diff --git a/python/OpenVINO_EP/yolov8_object_detection/README.md b/python/OpenVINO_EP/yolov8_object_detection/README.md deleted file mode 100644 index df4a80c57..000000000 --- a/python/OpenVINO_EP/yolov8_object_detection/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Object detection with yolov8 in Python using OpenVINO™ Execution Provider: - -1. The Object detection sample uses a yolov8 Deep Learning ONNX Model from ultralytics. - -2. The sample involves presenting a image to ONNX Runtime (RT), which uses the OpenVINO™ Execution Provider to run inference on various Intel hardware devices and perform object detection to detect up to 80 different objects like birds, bench, dogs, person and much more. - -## Requirements -For all the python package dependencies requirements, check 'requirements.txt' file in the sample directory. You may also install these dependencies with in a virtual environment: -```bash -pip3 install -r requirements.txt -``` - -# How to build -## Prerequisites -1. Download and export yolov8 model from ultralytics - Download pytorch model: wget https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt -O yolov8n.pt - - Convert pytorch model to onnx: yolo mode=export model=yolov8n.pt format=onnx dynamic=True - -## Install ONNX Runtime for OpenVINO™ Execution Provider -Please install the onnxruntime-openvino python package from [here](https://pypi.org/project/onnxruntime-openvino). The package for Linux contains prebuilt OpenVINO Libs with ABI 0. -``` -pip3 install onnxruntime-openvino openvino -``` - -## Reference Documentation -[Documentation](https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html) - - - -### How to run the sample -```bash -python3 yolov8.py.py --h -``` -## Running the ONNXRuntime OpenVINO™ Execution Provider sample -```bash -python3 yolov8.py --model --device "OVEP" -``` - -## References: - -[Download OpenVINO™ Eexecution Provider Latest pip wheels from here](https://pypi.org/project/onnxruntime-openvino/) - -[Python Pip Wheel Packages](https://www.intel.com/content/www/us/en/artificial-intelligence/posts/openvino-execution-provider-for-onnx-runtime.html) - -[Get started with ORT for Python](https://onnxruntime.ai/docs/get-started/with-python.html) \ No newline at end of file diff --git a/python/OpenVINO_EP/yolov8_object_detection/colab_notebook/OVEP_yolov8_colab_sample.ipynb b/python/OpenVINO_EP/yolov8_object_detection/colab_notebook/OVEP_yolov8_colab_sample.ipynb deleted file mode 100644 index da462c1e0..000000000 --- a/python/OpenVINO_EP/yolov8_object_detection/colab_notebook/OVEP_yolov8_colab_sample.ipynb +++ /dev/null @@ -1,452 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "y5HNjkLDZi5N" - }, - "source": [ - "Copyright (C) 2022-2023, Intel Corporation\n", - "\n", - "SPDX-License-Identifier: Apache-2.0" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "v7M6g-P1ZtSi" - }, - "source": [ - "# Object detection with YOLOv8 in Python using OpenVINO™ Execution Provider:\n", - "\n", - "1. The Object detection sample uses a YOLOv8 Deep Learning ONNX Model.\n", - "\n", - "\n", - "2. The sample involves detecting various objects in an image, which uses the OpenVINO™ Execution Provider to run inference on various Intel hardware devices as mentioned before and perform object detection to detect up to 80 different objects like person, bicycle, car, motorbike and much more from the coco dataset." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "oGAysuZujJCK" - }, - "source": [ - "First, let's make sure we're running this notebook on a supported Intel CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5KeQp-9CjKz7" - }, - "outputs": [], - "source": [ - "# Note: This works only on Linux!\n", - "!lscpu | grep name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Q4cX8riJWuNk" - }, - "source": [ - "##Installing packages\n", - "Getting Started\n", - "\n", - "In this section, we install onnxruntime-openvino, nncf, onnx, and other necessary Python packages." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "UPwBfDzCWmVX" - }, - "outputs": [], - "source": [ - "!python3 -m pip install --upgrade pip -q\n", - "!python3 -m pip install onnxruntime-openvino onnx opencv-python setuptools==65.5.1 ultralytics==8.0.136 -q" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hdIH9iq4aTXt" - }, - "source": [ - "##Downloading image for object detection and preprocessing it" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "5BqlWQzFbhf4" - }, - "outputs": [], - "source": [ - "#@title Please insert image url for object detection\n", - "\n", - "image_url = \"https://ultralytics.com/images/bus.jpg\" #@param [https://ultralytics.com/images/bus.jpg] {type:\"string\"}\n", - "print(\"other images options : https://storage.openvinotoolkit.org/data/test_data/images/cat.jpg, https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fYwIEFWpkFqz" - }, - "source": [ - "Importing required packages" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XmHT7vWLkKFy" - }, - "outputs": [], - "source": [ - "import os\n", - "import requests\n", - "import shutil\n", - "import cv2\n", - "import numpy as np\n", - "import onnxruntime as rt\n", - "import torch\n", - "from statistics import mean\n", - "from datetime import datetime\n", - "from ultralytics.yolo.data.augment import LetterBox" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FvvSIzsskYbE" - }, - "source": [ - "Downloading the image from given url and preprocessing it for object detection" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "W5B11Gp5aZKz" - }, - "outputs": [], - "source": [ - "# Parameters for pre-processing\n", - "imgsz = (640,640) # default value for this usecase.\n", - "stride = 32 # default value for this usecase( differs based on the model selected\n", - "\n", - "print(image_url)\n", - "def preprocess(image_url):\n", - " ## Set up the image URL\n", - " path = os.getcwd()\n", - " image_path=os.path.join(path, image_url.split(\"/\")[-1])\n", - " # Open the url image, set stream to True, this will return the stream content.\n", - " r = requests.get(image_url, stream = True)\n", - " # Check if the image was retrieved successfully\n", - " if r.status_code == 200:\n", - " # Set decode_content value to True, otherwise the downloaded image file's size will be zero.\n", - " r.raw.decode_content = True\n", - " # Open a local file with wb ( write binary ) permission.\n", - " with open(image_path,'wb') as f:\n", - " shutil.copyfileobj(r.raw, f)\n", - " print('Image sucessfully downloaded: ',path)\n", - " else:\n", - " print('Image couldn\\'t be retreived')\n", - " return\n", - " image_abs_path = os.path.abspath(image_path)\n", - " if os.path.isfile(image_abs_path) and image_abs_path.split('.')[-1].lower() in ['jpg', 'jpeg', 'png']:\n", - " # Load Image\n", - " img0 = cv2.imread(image_abs_path)\n", - " # Padded resize\n", - " #Letterbox: Resize image and padding for detection, instance segmentation, pose\n", - " img = LetterBox(imgsz, stride=stride)(image=img0.copy())\n", - " # Convert\n", - " img = img.transpose((2, 0, 1))[::-1] # BGR to RGB, to 3x416x416\n", - " img = np.ascontiguousarray(img)\n", - " img = img.astype(np.float32) # uint8 to fp16/32\n", - " img /= 255.0 # 0 - 255 to 0.0 - 1.0\n", - " if img.ndim == 3:\n", - " img = np.expand_dims(img, axis=0)\n", - " return img0, img\n", - " else:\n", - " print(\"Invalid image format.\")\n", - " return\n", - "\n", - "org_input, model_input = preprocess(image_url)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1LOTNlHfhjQT" - }, - "source": [ - "## Downloading yolov8 model and exporting it to onnx" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0vAlvihChxnv" - }, - "outputs": [], - "source": [ - "!mkdir -p model\n", - "if not os.path.isfile('model/yolov8m.pt'):\n", - " !cd model && wget https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt -O yolov8m.pt\n", - "\n", - "# Convert pytorch version of YOLOV8 model file to onnx format\n", - "if not os.path.isfile('model/yolov8m.onnx'):\n", - " !cd model && yolo mode=export model=yolov8m.pt format=onnx dynamic=True # To get a static model chnage dynamic flag to Flase\n", - "else:\n", - " print(\"ONNX model file already exist. Please re-run the cell after removing it.\")\n", - "\n", - "print(os.getcwd())" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "btKE6yeDgj4T" - }, - "source": [ - "## Initialization" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "WrPPbh0Qgnt-" - }, - "outputs": [], - "source": [ - "quantized_model_path = \"/content/model/yolov8m_quantized.onnx\"\n", - "original_model_path = \"/content/model/yolov8m.onnx\"\n", - "def initialize(quantize=False, device='OVEP'):\n", - " \"Initialize the model also getting model output and input names\"\n", - "\n", - " initialized = True\n", - " model_dir = os.getcwd()\n", - " ov_model = None; mlas_model = None\n", - "\n", - " so = rt.SessionOptions()\n", - " if device == 'OVEP':\n", - " if quantize == True:\n", - " print(\"Inferencing through OVEP\")\n", - " ov_model = rt.InferenceSession(quantized_model_path, so,\n", - " providers=['OpenVINOExecutionProvider'],\n", - " provider_options=[{'device_type' : 'CPU_FP32'}])\n", - " else:\n", - " ov_model = rt.InferenceSession(original_model_path, so,\n", - " providers=['OpenVINOExecutionProvider'],\n", - " provider_options=[{'device_type' : 'CPU_FP32'}])\n", - " elif device == 'CPUEP':\n", - " if quantize == True:\n", - " mlas_model = rt.InferenceSession(quantized_model_path, so, providers=['CPUExecutionProvider'])\n", - " else:\n", - " mlas_model = rt.InferenceSession(original_model_path, so, providers=['CPUExecutionProvider'])\n", - "\n", - " if device == 'OVEP':\n", - " input_names = ov_model.get_inputs()[0].name\n", - " outputs = ov_model.get_outputs()\n", - " else:\n", - " input_names = mlas_model.get_inputs()[0].name\n", - " outputs = mlas_model.get_outputs()\n", - " output_names = list(map(lambda output:output.name, outputs))\n", - " return input_names, output_names, mlas_model, ov_model\n", - "\n", - "\n", - "device = 'OVEP'\n", - "input_names, output_names, mlas_model, ov_model = initialize(device=device)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IqI1dSmboO1r" - }, - "source": [ - "##Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "h1tYkwGQnKtY" - }, - "outputs": [], - "source": [ - "#@title Select number of iterations for inference\n", - "\n", - "#No of iterations always greater than warmup iterations\n", - "\n", - "no_of_iterations = 20 #@param {type:\"slider\", min:20, max:100, step:5}\n", - "warmup_iterations = 10 #@param {type:\"slider\", min:10, max:15, step:1}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bVFDmjaQoQhP" - }, - "outputs": [], - "source": [ - "inf_lst = []\n", - "def inference(input_names, output_names, device, mlas_model, ovep_model, model_input):\n", - " if device == 'CPUEP':\n", - " print(\"Performing ONNX Runtime Inference with default CPU EP.\")\n", - " for i in range(no_of_iterations):\n", - " start_time = datetime.now()\n", - " prediction = mlas_model.run(output_names, {input_names: model_input})\n", - " end_time = datetime.now()\n", - " # print((end_time - start_time).total_seconds())\n", - " if i > warmup_iterations:\n", - " inf_lst.append((end_time - start_time).total_seconds())\n", - " elif device == 'OVEP':\n", - " print(\"Performing ONNX Runtime Inference with OpenVINO EP.\")\n", - " for i in range(no_of_iterations):\n", - " start_time = datetime.now()\n", - " prediction = ovep_model.run(output_names, {input_names: model_input})\n", - " end_time = datetime.now()\n", - " # print((end_time - start_time).total_seconds())\n", - " if i > warmup_iterations:\n", - " inf_lst.append((end_time - start_time).total_seconds())\n", - " else:\n", - " print(\"Invalid Device Option. Supported device options are 'cpu', 'CPU_FP32'.\")\n", - " return None\n", - " return prediction, (end_time - start_time).total_seconds()\n", - "\n", - "inference_output = inference(input_names, output_names, device, mlas_model, ov_model, model_input)\n", - "# print(f'Inference time with {execution_provider} : {inference_output[1]}')\n", - "average_inference_time = np.average(inf_lst)\n", - "print(f'Average inference time is for {no_of_iterations - warmup_iterations} iterations is {average_inference_time} sec')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "T1F3WqTCxA3v" - }, - "source": [ - "##Postprocess" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "m2fxUSilxC-I" - }, - "outputs": [], - "source": [ - "from ultralytics.yolo.engine.results import Results\n", - "from ultralytics.yolo.utils import ops\n", - "from ultralytics.yolo.utils import ROOT, yaml_load\n", - "from ultralytics.yolo.utils.checks import check_yaml\n", - "from ultralytics.yolo.utils.plotting import Annotator, colors\n", - "from google.colab.patches import cv2_imshow\n", - "import os\n", - "import torch\n", - "\n", - "# Parameters for post-processing\n", - "conf = 0.25\n", - "iou = 0.45\n", - "max_det = 300\n", - "classes = None\n", - "agnostic = False\n", - "labels = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat',\n", - " 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat',\n", - " 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack',\n", - " 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball',\n", - " 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle',\n", - " 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich',\n", - " 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch',\n", - " 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote',\n", - " 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book',\n", - " 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'}\n", - "path = os.getcwd()\n", - "\n", - "def postprocess( img0, img, inference_output):\n", - " if inference_output is not None:\n", - " prediction = inference_output[0]\n", - " inference_time = inference_output[1]\n", - "\n", - " prediction = [torch.from_numpy(pred) for pred in prediction]\n", - " preds = ops.non_max_suppression(prediction,\n", - " 0.25,\n", - " 0.45,\n", - " agnostic=agnostic,\n", - " max_det=max_det,\n", - " classes=classes)\n", - " log_string = ''\n", - " results = []\n", - " for _, pred in enumerate(preds):\n", - " pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], img0.shape).round()\n", - " results.append(Results(img0, path, labels, boxes=pred))\n", - "\n", - " det = results[0].boxes\n", - " if len(det) == 0:\n", - " return log_string+'No detection found.'\n", - " for c in det.cls.unique():\n", - " n = (det.cls == c).sum() # detections per class\n", - " log_string += f\"{n} {labels[int(c)]}{'s' * (n > 1)}, \"\n", - "\n", - " raw_output = ''\n", - " annotator = Annotator(img0, pil=False)\n", - " for d in reversed(det):\n", - " cls, conf = d.cls.squeeze(), d.conf.squeeze()\n", - " c = int(cls) # integer class\n", - " name = f'id:{int(d.id.item())} {labels[c]}' if d.id is not None else labels[c]\n", - " label = f'{name} {conf:.2f}'\n", - " box = d.xyxy.squeeze().tolist()\n", - " p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))\n", - " raw_output+=f\"name: {name}, confidence: {conf:.2f}, start_point: {p1}, end_point:{p2}\\n\"\n", - " annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))\n", - " # annotator.box_label(box, label, color=colors(c, True))\n", - "\n", - " result_img = annotator.result()\n", - " # cv2.imshow('image', org_input)\n", - " cv2_imshow( org_input)\n", - " # cv2_imshow(cv2.resize(org_input, (500, 500)))\n", - " cv2.waitKey(0)\n", - " cv2.destroyAllWindows()\n", - "\n", - " return [f\"inference_time: {inference_time}s\\nInference_summary: {log_string}\\nraw_output:\\n{raw_output}\"]\n", - " return None\n", - "\n", - "result = postprocess(org_input, model_input, inference_output)" - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [ - "Q4cX8riJWuNk" - ], - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/python/OpenVINO_EP/yolov8_object_detection/requirements.txt b/python/OpenVINO_EP/yolov8_object_detection/requirements.txt deleted file mode 100644 index 1dfa8475f..000000000 --- a/python/OpenVINO_EP/yolov8_object_detection/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -ultralytics==8.0.100 -onnxruntime-openvino -onnx -setuptools==70.0.0 \ No newline at end of file diff --git a/python/OpenVINO_EP/yolov8_object_detection/yolov8.py b/python/OpenVINO_EP/yolov8_object_detection/yolov8.py deleted file mode 100644 index 94e82a911..000000000 --- a/python/OpenVINO_EP/yolov8_object_detection/yolov8.py +++ /dev/null @@ -1,239 +0,0 @@ -import os -from datetime import datetime -from pyexpat import model -import requests -import shutil -import torch -from zipfile import ZipFile -import platform -import subprocess -import argparse - -# Libraries for pre and post processsing -from ultralytics.yolo.data.augment import LetterBox -from ultralytics.yolo.engine.results import Results -from ultralytics.yolo.utils import ops -from ultralytics.yolo.utils import ROOT, yaml_load -from ultralytics.yolo.utils.checks import check_yaml -from ultralytics.yolo.utils.plotting import Annotator, colors - -# import onnx_runtime related package -import onnxruntime as rt -import onnx -import numpy as np -import cv2 -import sys -from onnxruntime.quantization import quantize_dynamic, QuantType - -import openvino -import onnxruntime.tools.add_openvino_win_libs as utils -utils.add_openvino_libs_to_path() - -CLASSES = yaml_load(check_yaml('coco128.yaml'))['names'] - -def parse_arguments(): - parser = argparse.ArgumentParser(description='Object Detection using YOLOv8 using OpenVINO Execution Provider for ONNXRuntime') - - parser.add_argument('--device', default='OVEP', help="Device to perform inference on 'cpu (MLAS)' or on OpenVINO-Execution provider.") - - parser.add_argument('--model', required=True, help='Path to model.') - - parser.add_argument('--image_url', default='https://storage.openvinotoolkit.org/data/test_data/images/cat.jpg', help='url for image to download for object detection \ - other options to download images are \ - https://storage.openvinotoolkit.org/data/test_data/images/dog.jpg\ - https://storage.openvinotoolkit.org/data/test_data/images/banana.jpg\ - https://storage.openvinotoolkit.org/data/test_data/images/apple.jpg\ - https://storage.openvinotoolkit.org/data/test_data/images/car.png') - - parser.add_argument('--niter', default=30, type=int, help='total number of iterations') - - parser.add_argument('--warmup_iter', default=10, type=int, help='total number of iterations') - - parser.add_argument("--show_image",help="Optional. Show image with object detection.",action='store_true') - - args = parser.parse_args() - return args - - -def initialize(model_path, device='OVEP'): - "Initialize the model also getting model output and input names" - - initialized = True - model_dir = os.getcwd() - so = rt.SessionOptions() - - if device == "OVEP": - print("Creating session for OVEP") - session = rt.InferenceSession(model_path, so, - providers=['OpenVINOExecutionProvider'], - provider_options=[{'device_type' : 'CPU_FP32'}]) - else: - print("Creating session for CPUEP") - session = rt.InferenceSession(model_path, so, providers=['CPUExecutionProvider']) - - - input_names = session.get_inputs()[0].name - outputs = session.get_outputs() - - output_names = list(map(lambda output:output.name, outputs)) - return input_names, output_names, session - -def preprocess(image_url): - ## Set up the image URL and filename - path = os.getcwd() - image_path=os.path.join(path, image_url.split("/")[-1]) - - # Open the url image, set stream to True, this will return the stream content. - r = requests.get(image_url, stream = True) - - # Check if the image was retrieved successfully - if r.status_code == 200: - # Set decode_content value to True, otherwise the downloaded image file's size will be zero. - r.raw.decode_content = True - - # Open a local file with wb ( write binary ) permission. - with open(image_path,'wb') as f: - shutil.copyfileobj(r.raw, f) - - print('Image sucessfully downloaded: ',path) - else: - print('Image couldn\'t be retreived') - return - - image_abs_path = os.path.abspath(image_path) - if os.path.isfile(image_abs_path) and image_abs_path.split('.')[-1].lower() in ['jpg', 'jpeg', 'png']: - - # Load Image - img0 = cv2.imread(image_abs_path) - - # Padded resize - #Letterbox: Resize image and padding for detection, instance segmentation, pose - img = LetterBox(imgsz, stride=stride)(image=img0.copy()) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - img = img.astype(np.float32) # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 - - if img.ndim == 3: - img = np.expand_dims(img, axis=0) - return img0, img - else: - print("Invalid image format.") - return - -def inference(input_names, output_names, model, model_input, no_of_iterations, warmup_iter): - inf_lst = [] - - for i in range(no_of_iterations): - start_time = datetime.now() - prediction = model.run(output_names, {input_names: model_input}) - end_time = datetime.now() - if i >= warmup_iter: - inf_lst.append((end_time - start_time).total_seconds()) - - average_inference_time = np.average(inf_lst) - print(f'Average inference time is for {i+1 - warmup_iter} iterations is {average_inference_time}') - return prediction, (end_time - start_time).total_seconds() - -def postprocess( img0, img, inference_output): - if inference_output is not None: - prediction = inference_output[0] - inference_time = inference_output[1] - - prediction = [torch.from_numpy(pred) for pred in prediction] - preds = ops.non_max_suppression(prediction, - 0.25, - 0.45, - agnostic=agnostic, - max_det=max_det, - classes=classes) - log_string = '' - results = [] - for _, pred in enumerate(preds): - pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], img0.shape).round() - results.append(Results(img0, path, labels, boxes=pred)) - - det = results[0].boxes - - if len(det) == 0: - return log_string+'No detection found.' - for c in det.cls.unique(): - n = (det.cls == c).sum() # detections per class - log_string += f"{n} {labels[int(c)]}{'s' * (n > 1)}, " - - raw_output = '' - annotator = Annotator(img0, pil=False) - for d in reversed(det): - cls, conf = d.cls.squeeze(), d.conf.squeeze() - c = int(cls) # integer class - name = f'id:{int(d.id.item())} {labels[c]}' if d.id is not None else labels[c] - label = f'{name} {conf:.2f}' - box = d.xyxy.squeeze().tolist() - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - raw_output+=f"name: {name}, confidence: {conf:.2f}, start_point: {p1}, end_point:{p2}\n" - annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) - # annotator.box_label(box, label, color=colors(c, True)) - - result_img = annotator.result() - if args.show_image: - cv2.imshow('image', org_input) - cv2.waitKey(0) - cv2.destroyAllWindows() - - return [f"inference_time: {inference_time}s\nInference_summary: {log_string}\nraw_output:\n{raw_output}"] - return None - -if __name__ == "__main__": - - # Process arguments - args = parse_arguments() - no_of_iterations = args.niter - warmup_iter = args.warmup_iter - device = args.device - original_model_path = args.model - - print("device : ", device) - - if warmup_iter >= no_of_iterations: - sys.exit("Warmup iterations are more than no of iterations(niter)!!") - - # Parameters for pre-processing - imgsz = (640,640) # default value for this usecase. - stride = 32 # default value for this usecase( differs based on the model selected ) - - # Parameters for post-processing - conf = 0.25 - iou = 0.45 - max_det = 300 - classes = None - agnostic = False - labels = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', - 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', - 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', - 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', - 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', - 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', - 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'} - - path = os.getcwd() - - org_input, model_input = preprocess(args.image_url) - model_path_actual = original_model_path - - if device == 'CPUEP': - print("Starting ONNX Runtime Inference with default CPU EP.") - input_names, output_names, model = initialize(model_path=model_path_actual, device=device) - inference_output = inference(input_names, output_names, model, model_input, no_of_iterations, warmup_iter) - elif device == 'OVEP': - print("Starting ONNX Runtime Inference with OVEP.") - input_names, output_names, model = initialize(model_path=model_path_actual, device=device) - inference_output = inference(input_names, output_names, model, model_input, no_of_iterations, warmup_iter) - else: - sys.exit("Invalid Device Option. Supported device options are 'CPUEP', 'OVEP.") - - #pred, time_required = inference_output - - result = postprocess(org_input, model_input, inference_output) \ No newline at end of file