Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New API ONNXRT example update #187

Merged
merged 44 commits into from
Dec 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
7b2c5fc
update example for new API
yuwenzho Nov 29, 2022
e8beaed
update example for new API
yuwenzho Nov 29, 2022
a972603
update example for new API
yuwenzho Nov 29, 2022
3c0291d
update example for new API
yuwenzho Nov 29, 2022
7e09b64
update onnx example
yuwenzho Dec 7, 2022
b1a5c29
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 7, 2022
80ecba6
update onnx example
yuwenzho Dec 7, 2022
533903d
update onnxrt example link
yuwenzho Dec 7, 2022
981d175
remove onnrt example with old API
yuwenzho Dec 7, 2022
e99f41a
update onnxrt example
yuwenzho Dec 8, 2022
c665e8f
fix conflict
yuwenzho Dec 9, 2022
151c5f3
update onnxrt example params
yuwenzho Dec 9, 2022
b7305f4
update onnxrt example link
yuwenzho Dec 9, 2022
96fb672
fix typo
yuwenzho Dec 13, 2022
51f75fa
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 16, 2022
202ee5a
update onnxrt example batch size
yuwenzho Dec 16, 2022
15c4863
update inc_dict.txt
yuwenzho Dec 16, 2022
d93ea26
fix example bug
yuwenzho Dec 19, 2022
31aa991
fix example bug
yuwenzho Dec 19, 2022
19dd927
fix batch size bug
yuwenzho Dec 19, 2022
e311f57
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 19, 2022
ef975dd
update numpy dtype
yuwenzho Dec 19, 2022
89fa234
update version
yuwenzho Dec 19, 2022
e6407c5
fix dataloader error
yuwenzho Dec 19, 2022
e4f4b5e
fix conflicts
yuwenzho Dec 19, 2022
667fb3d
fix example error
yuwenzho Dec 20, 2022
8d755eb
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 20, 2022
3ac23d7
fix quantize bug
yuwenzho Dec 21, 2022
6465e4d
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 21, 2022
42b202c
fix typo
yuwenzho Dec 21, 2022
31bbb9b
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 21, 2022
9603b43
update ort example & fix typo
yuwenzho Dec 22, 2022
7fb5e57
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 22, 2022
20a22fc
update model config and link
yuwenzho Dec 22, 2022
ea851f5
remove tiny_yolov3, yolov3, yolov4
yuwenzho Dec 22, 2022
689cf19
remove ort mask-rcnn, faster-rcnn, ssd
yuwenzho Dec 23, 2022
a6ff8c8
update onnxrt example
yuwenzho Dec 23, 2022
a3d7ac0
update ort example code
yuwenzho Dec 23, 2022
8b0e8b7
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 23, 2022
dcfe31a
fix example error
yuwenzho Dec 23, 2022
65fbc05
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 23, 2022
db7caf6
fix ort example benchmark
yuwenzho Dec 24, 2022
765b894
Merge branch 'master' into new_api_onnx_example
yuwenzho Dec 24, 2022
828dfa6
fix performance benchmark
yuwenzho Dec 25, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions .azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2462,6 +2462,7 @@ WeightPruningConfig
Namhoon
Thalaiyasingam
Torr
QOperator
MixedPrecisionConfig
ONNXConfig
Arial
Expand Down
114 changes: 37 additions & 77 deletions examples/.config/model_params_onnxrt.json
Original file line number Diff line number Diff line change
Expand Up @@ -117,32 +117,26 @@
"batch_size": 1,
"new_benchmark": true
},
"bert_squad_model_zoo": {
"bert_squad_model_zoo_dynamic": {
"model_src_dir": "nlp/onnx_model_zoo/bert-squad/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/squad",
"input_model": "/tf_dataset2/models/onnx/bert_squad/bert_squad_model_zoo.onnx",
"yaml": "bert.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 1
},
"mobilebert_squad_mlperf": {
"mobilebert_squad_mlperf_dynamic": {
"model_src_dir": "nlp/onnx_model_zoo/mobilebert/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/squad",
"input_model": "/tf_dataset2/models/onnx/mobilebert_squad/mobilebert_squad_mlperf.onnx",
"yaml": "mobilebert.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 1
},
"gpt2_lm_head_wikitext_model_zoo": {
"gpt2_lm_head_wikitext_model_zoo_dynamic": {
"model_src_dir": "nlp/onnx_model_zoo/gpt2/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/wikitext/wikitext-2-raw/",
"input_model": "/tf_dataset2/models/onnx/gpt2/gpt2_lm_head_wikitext_model_zoo.onnx",
"yaml": "gpt2.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": false
"main_script": "gpt2.py",
"batch_size": 1
},
"vgg16_model_zoo": {
"model_src_dir": "image_recognition/onnx_model_zoo/vgg16/quantization/ptq",
Expand Down Expand Up @@ -414,23 +408,12 @@
"batch_size": 1,
"new_benchmark": true
},
"bert_squad_model_zoo_qdq": {
"model_src_dir": "nlp/onnx_model_zoo/bert-squad/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/squad",
"input_model": "/tf_dataset2/models/onnx/bert_squad/bert_squad_model_zoo.onnx",
"yaml": "bert_qdq.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
},
"mobilebert_squad_mlperf_qdq": {
"model_src_dir": "nlp/onnx_model_zoo/mobilebert/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/squad",
"input_model": "/tf_dataset2/models/onnx/mobilebert_squad/mobilebert_squad_mlperf-13.onnx",
"yaml": "mobilebert_qdq.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 1
},
"vgg16_model_zoo_qdq": {
"model_src_dir": "image_recognition/onnx_model_zoo/vgg16/quantization/ptq",
Expand Down Expand Up @@ -589,10 +572,8 @@
"model_src_dir": "object_detection/onnx_model_zoo/DUC/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/leftImg8bit/val",
"input_model": "/tf_dataset2/models/onnx/DUC/ResNet101-DUC-12.onnx",
"yaml": "DUC.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 1
},
"arcface": {
"model_src_dir": "image_recognition/onnx_model_zoo/arcface/quantization/ptq",
Expand Down Expand Up @@ -630,104 +611,83 @@
"batch_size": 1,
"new_benchmark": true
},
"BiDAF": {
"BiDAF_dynamic": {
"model_src_dir": "nlp/onnx_model_zoo/BiDAF/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/squad/dev-v1.1.json",
"input_model": "/tf_dataset2/models/onnx/BiDAF/bidaf-11.onnx",
"yaml": "bidaf.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 1
},
"hf_bert-base-uncased_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/MRPC",
"input_model": "/tf_dataset2/models/onnx/hf_bert-base-uncased_dynamic/bert-base-uncased-mrpc.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_roberta-base_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/MRPC",
"input_model": "/tf_dataset2/models/onnx/hf_roberta-base_dynamic/roberta-base-mrpc.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_xlm-roberta-base_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/MRPC",
"input_model": "/tf_dataset2/models/onnx/hf_xlm-roberta-base_dynamic/xlm-roberta-base-mrpc.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_camembert-base_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/MRPC",
"input_model": "/tf_dataset2/models/onnx/hf_camembert-base_dynamic/camembert-base-mrpc.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_MiniLM-L12-H384-uncased_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/MRPC",
"input_model": "/tf_dataset2/models/onnx/hf_MiniLM-L12-H384-uncased_dynamic/MiniLM-L12-H384-uncased-mrpc.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_distilbert-base-uncased_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/SST-2/",
"input_model": "/tf_dataset2/models/onnx/hf_distilbert-base-uncased_dynamic/distilbert-base-uncased-finetuned-sst-2-english.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_albert-base-v2_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/SST-2/",
"input_model": "/tf_dataset2/models/onnx/hf_albert-base-v2_dynamic/albert-base-v2-sst2.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_MiniLM-L6-H384-uncased_dynamic": {
"model_src_dir": "nlp/huggingface_model/text_classification/quantization/ptq",
"dataset_location": "/tf_dataset/pytorch/glue_data/SST-2/",
"input_model": "/tf_dataset2/models/onnx/hf_MiniLM-L6-H384-uncased_dynamic/MiniLM-L6-H384-uncased-sst2.onnx",
"yaml": "glue_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 8
},
"hf_spanbert_dynamic": {
"model_src_dir": "nlp/huggingface_model/question_answering/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/squad",
"input_model": "/tf_dataset2/models/onnx/hf_spanbert_dynamic/spanbert-finetuned-squadv1.onnx",
"yaml": "qa_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 1
},
"hf_bert-base-multilingual-cased_dynamic": {
"model_src_dir": "nlp/huggingface_model/question_answering/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/squad",
"input_model": "/tf_dataset2/models/onnx/hf_bert-base-multilingual-cased_dynamic/bert-base-multilingual-cased-finetuned-squad.onnx",
"yaml": "qa_dynamic.yaml",
"strategy": "basic",
"batch_size": 1,
"new_benchmark": true
"main_script": "main.py",
"batch_size": 1
}
}
}

34 changes: 17 additions & 17 deletions examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -893,49 +893,49 @@ Intel® Neural Compressor validated examples with multiple compression technique
<td>BERT base MRPC</td>
<td>Natural Language Processing</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/nlp/bert/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/bert/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/bert/quantization/ptq">integerops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/bert/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>BERT base MRPC</td>
<td>Natural Language Processing</td>
<td>Post-Training Dynamic Quantization</td>
<td><a href="./onnxrt/nlp/bert/quantization/ptq">integerops</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/bert/quantization/ptq">integerops</a></td>
</tr>
<tr>
<td>DistilBERT base MRPC</td>
<td>Natural Language Processing</td>
<td>Post-Training Dynamic / Static Quantization</td>
<td><a href="./onnxrt/nlp/distilbert/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/distilbert/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/distilbert/quantization/ptq">integerops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/distilbert/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>Mobile bert MRPC</td>
<td>Natural Language Processing</td>
<td>Post-Training Dynamic / Static Quantization</td>
<td><a href="./onnxrt/nlp/mobilebert/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/mobilebert/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/mobilebert/quantization/ptq">integerops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/mobilebert/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>Roberta base MRPC</td>
<td>Natural Language Processing</td>
<td>Post-Training Dynamic / Static Quantization</td>
<td><a href="./onnxrt/nlp/roberta/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/roberta/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/roberta/quantization/ptq">integerops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/nlp/roberta/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>BERT SQuAD</td>
<td>Natural Language Processing</td>
<td>Post-Training Dynamic / Static Quantization</td>
<td><a href="./onnxrt/nlp/bert-squad/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/bert-squad/quantization/ptq">qdq</a></td>
<td><a href="./onnxrt/nlp/onnx_model_zoo/bert-squad/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/onnx_model_zoo/bert-squad/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>GPT2 lm head WikiText</td>
<td>Natural Language Processing</td>
<td>Post-Training Dynamic Quantization</td>
<td><a href="./onnxrt/nlp/gpb2/quantization/ptq">integerops</a></td>
<td><a href="./onnxrt/nlp/onnx_model_zoo/gpt2/quantization/ptq">integerops</a></td>
</tr>
<tr>
<td>MobileBERT SQuAD MLPerf</td>
<td>Natural Language Processing</td>
<td>Post-Training Dynamic / Static Quantization</td>
<td><a href="./onnxrt/nlp/mobilebert/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/mobilebert/quantization/ptq">qdq</a></td>
<td><a href="./onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq">integerops</a> / <a href="./onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>BiDAF</td>
Expand Down Expand Up @@ -1007,19 +1007,19 @@ Intel® Neural Compressor validated examples with multiple compression technique
<td>SSD MobileNet V1</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq">qlinearops</a> / <a href="./onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>SSD MobileNet V2</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/ssd_mobilenet_v2/quantization/ptq">qlinearops</a> / <a href="./onnxrt/object_detection/ssd_mobilenet_v2/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/ssd_mobilenet_v2/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/ssd_mobilenet_v2/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>SSD MobileNet V1 (ONNX Model Zoo)</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/onnx_model_zoo/ssd_mobilenet_v1/quantization/ptq">qlinearops</a> / <a href="./onnxrt/object_detection/onnx_model_zoo/ssd_mobilenet_v1/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/ssd_mobilenet_v1/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/ssd_mobilenet_v1/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>DUC</td>
Expand All @@ -1031,37 +1031,37 @@ Intel® Neural Compressor validated examples with multiple compression technique
<td>Faster R-CNN</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/onnx_model_zoo/faster_rcnn/quantization/ptq">qlinearops</a> / <a href="./onnxrt/object_detection/onnx_model_zoo/faster_rcnn/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/faster_rcnn/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/faster_rcnn/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>Mask R-CNN</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/onnx_model_zoo/mask_rcnn/quantization/ptq">qlinearops</a> / <a href="./onnxrt/object_detection/onnx_model_zoo/mask_rcnn/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/mask_rcnn/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/mask_rcnn/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>SSD</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/onnx_model_zoo/ssd/quantization/ptq">qlinearops</a> / <a href="./onnxrt/object_detection/onnx_model_zoo/ssd/quantization/ptq">qdq</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/ssd/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/ssd/quantization/ptq">qdq</a></td>
</tr>
<tr>
<td>Tiny YOLOv3</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/onnx_model_zoo/tiny_yolov3/quantization/ptq">qlinearops</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/tiny_yolov3/quantization/ptq">qlinearops</a></td>
</tr>
<tr>
<td>YOLOv3</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/onnx_model_zoo/yolov3/quantization/ptq">qlinearops</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/yolov3/quantization/ptq">qlinearops</a></td>
</tr>
<tr>
<td>YOLOv4</td>
<td>Object Detection</td>
<td>Post-Training Static Quantization</td>
<td><a href="./onnxrt/object_detection/onnx_model_zoo/yolov4/quantization/ptq">qlinearops</a></td>
<td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/onnxrt/object_detection/onnx_model_zoo/yolov4/quantization/ptq">qlinearops</a></td>
</tr>
<tr>
<td>Emotion FERPlus</td>
Expand Down