Skip to content

Commit

Permalink
Fix framework name in DataLoader API and refine dataloader & metric d…
Browse files Browse the repository at this point in the history
…oc (#1048)

Signed-off-by: yuwenzho <yuwen.zhou@intel.com>
  • Loading branch information
yuwenzho committed Jul 3, 2023
1 parent f2fec43 commit 447cc7f
Show file tree
Hide file tree
Showing 22 changed files with 31 additions and 29 deletions.
1 change: 1 addition & 0 deletions .azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt
Expand Up @@ -2676,3 +2676,4 @@ fdb
jJA
wWLes
xHKe
PR
7 changes: 4 additions & 3 deletions docs/source/dataloader.md
Expand Up @@ -5,9 +5,9 @@ DataLoader

2. [Supported Framework Dataloader Matrix](#supported-framework-dataloader-matrix)

3. [Get Started with Dataloader](#get-start-with-dataloader)
3. [Get Started with Dataloader](#get-started-with-dataloader)

3.1 [Use Intel® Neural Compressor DataLoader API](#use-intel®-neural-compressor-dataloader-api)
3.1 [Use Intel® Neural Compressor DataLoader API](#use-intel-neural-compressor-dataloader-api)

3.2 [Build Custom Dataloader with Python API](#build-custom-dataloader-with-python-api)

Expand Down Expand Up @@ -44,7 +44,7 @@ Of cause, users can also use frameworks own dataloader in Neural Compressor.
Acceptable parameters for `DataLoader` API including:
| Parameter | Description |
|:--------------|:----------|
|framework (str)| different frameworks, such as `tensorflow`, `keras`, `mxnet`, `pytorch` and `onnxrt`.|
|framework (str)| different frameworks, such as `tensorflow`, `tensorflow_itex`, `keras`, `mxnet`, `pytorch` and `onnxruntime`.|
|dataset (object)| A dataset object from which to get data. Dataset must implement __iter__ or __getitem__ method.|
|batch_size (int, optional)| How many samples per batch to load. Defaults to 1.|
|collate_fn (Callable, optional)| Callable function that processes the batch you want to return from your dataloader. Defaults to None.|
Expand All @@ -66,6 +66,7 @@ dataloader = DataLoader(framework='tensorflow', dataset=dataset)
config = PostTrainingQuantConfig()
q_model = quantization.fit(model, config, calib_dataloader=dataloader, eval_func=eval)
```
> Note: `DataLoader(framework='onnxruntime', dataset=dataset)` failed in neural-compressor v2.2. We have fixed it in this [PR](https://github.com/intel/neural-compressor/pull/1048).
### Build Custom Dataloader with Python API

Expand Down
6 changes: 3 additions & 3 deletions docs/source/metric.md
Expand Up @@ -11,9 +11,9 @@ Metrics

2.4. [ONNXRT](#onnxrt)

3. [Get Started with Metric](#get-start-with-metric)
3. [Get Started with Metric](#get-started-with-metric)

3.1. [Use Intel® Neural Compressor Metric API](#use-intel®-neural-compressor-metric-api)
3.1. [Use Intel® Neural Compressor Metric API](#use-intel-neural-compressor-metric-api)

3.2. [Build Custom Metric with Python API](#build-custom-metric-with-python-api)

Expand Down Expand Up @@ -105,7 +105,7 @@ q_model = fit(model, config, calib_dataloader=calib_dataloader, eval_dataloader=

### Build Custom Metric with Python API

Please refer to [Metrics code](../neural_compressor/metric), users can also register their own metric as follows:
Please refer to [Metrics code](../../neural_compressor/metric), users can also register their own metric as follows:

```python
class NewMetric(object):
Expand Down
2 changes: 1 addition & 1 deletion examples/onnxrt/nlp/bert/quantization/ptq_dynamic/main.py
Expand Up @@ -352,7 +352,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
2 changes: 1 addition & 1 deletion examples/onnxrt/nlp/bert/quantization/ptq_static/main.py
Expand Up @@ -359,7 +359,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
Expand Up @@ -345,7 +345,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
Expand Up @@ -352,7 +352,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
Expand Up @@ -493,7 +493,7 @@ def eval_func(model, *args):
conf = BenchmarkConfig(iteration=100,
cores_per_instance=28,
num_of_instance=1)
b_dataloader = DataLoader(framework='onnxrt', dataset=b_dataset, batch_size=model_args.batch_size)
b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size)
fit(model, conf, b_dataloader=b_dataloader)
elif model_args.mode == 'accuracy':
eval_f1 = eval_func(model)
Expand Down
Expand Up @@ -499,7 +499,7 @@ def eval_func(model, *args):
q_model = quantization.fit(model,
config,
eval_func=eval_func,
calib_dataloader=DataLoader(framework='onnxrt',
calib_dataloader=DataLoader(framework='onnxruntime',
dataset=calib_dataset,
batch_size=model_args.batch_size)
)
Expand All @@ -514,7 +514,7 @@ def eval_func(model, *args):
conf = BenchmarkConfig(iteration=100,
cores_per_instance=28,
num_of_instance=1)
b_dataloader = DataLoader(framework='onnxrt', dataset=b_dataset, batch_size=model_args.batch_size)
b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size)
fit(model, conf, b_dataloader=b_dataloader)
elif model_args.mode == 'accuracy':
eval_f1 = eval_func(model)
Expand Down
Expand Up @@ -347,7 +347,7 @@ def result(self):
data_dir=args.data_path,
model_name_or_path=args.model_name_or_path,
task=args.task)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model, *args):
Expand Down
Expand Up @@ -354,7 +354,7 @@ def result(self):
data_dir=args.data_path,
model_name_or_path=args.model_name_or_path,
task=args.task)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model, *args):
Expand Down
Expand Up @@ -455,7 +455,7 @@ def eval_func(model):
conf = BenchmarkConfig(iteration=100,
cores_per_instance=28,
num_of_instance=1)
b_dataloader = DataLoader(framework='onnxrt', dataset=b_dataset, batch_size=model_args.batch_size)
b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size)
fit(onnx_model, conf, b_dataloader=b_dataloader)
elif model_args.mode == 'accuracy':
eval_f1 = eval_func(onnx_model)
Expand Down
Expand Up @@ -456,7 +456,7 @@ def eval_func(model):
q_model = quantization.fit(onnx_model,
config,
eval_func=eval_func,
calib_dataloader=DataLoader(framework='onnxrt',
calib_dataloader=DataLoader(framework='onnxruntime',
dataset=calib_dataset,
batch_size=1))
q_model.save(model_args.save_path)
Expand All @@ -469,7 +469,7 @@ def eval_func(model):
conf = BenchmarkConfig(iteration=100,
cores_per_instance=28,
num_of_instance=1)
b_dataloader = DataLoader(framework='onnxrt', dataset=b_dataset, batch_size=model_args.batch_size)
b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size)
fit(onnx_model, conf, b_dataloader=b_dataloader)
elif model_args.mode == 'accuracy':
eval_f1 = eval_func(onnx_model)
Expand Down
Expand Up @@ -490,7 +490,7 @@ def eval_func(model):
conf = BenchmarkConfig(iteration=100,
cores_per_instance=28,
num_of_instance=1)
b_dataloader = DataLoader(framework='onnxrt', dataset=b_dataset, batch_size=model_args.batch_size)
b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size)
fit(onnx_model, conf, b_dataloader=b_dataloader)
elif model_args.mode == 'accuracy':
eval_f1 = eval_func(onnx_model)
Expand Down
Expand Up @@ -488,7 +488,7 @@ def eval_func(model):
q_model = quantization.fit(onnx_model,
config,
eval_func=eval_func,
calib_dataloader=DataLoader(framework='onnxrt',
calib_dataloader=DataLoader(framework='onnxruntime',
dataset=calib_dataset,
batch_size=1))
q_model.save(model_args.save_path)
Expand All @@ -502,7 +502,7 @@ def eval_func(model):
conf = BenchmarkConfig(iteration=100,
cores_per_instance=28,
num_of_instance=1,)
b_dataloader = DataLoader(framework='onnxrt', dataset=b_dataset, batch_size=model_args.batch_size)
b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size)
fit(onnx_model, conf, b_dataloader=b_dataloader)
elif model_args.mode == 'accuracy':
eval_f1 = eval_func(onnx_model)
Expand Down
Expand Up @@ -352,7 +352,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
Expand Up @@ -359,7 +359,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
Expand Up @@ -243,7 +243,7 @@ def eval_func(model):
conf = BenchmarkConfig(iteration=100,
cores_per_instance=4,
num_of_instance=1)
b_dataloader = DataLoader(framework='onnxrt', dataset=ds, batch_size=args.eval_batch_size)
b_dataloader = DataLoader(framework='onnxruntime', dataset=ds, batch_size=args.eval_batch_size)
fit(model, conf, b_dataloader=b_dataloader)
else:
evaluate(args, model, tokenizer)
Expand Down
Expand Up @@ -352,7 +352,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
Expand Up @@ -359,7 +359,7 @@ def result(self):
task=args.task,
model_type=args.model_type,
dynamic_length=args.dynamic_length)
dataloader = DataLoader(framework='onnxrt', dataset=dataset, batch_size=args.batch_size)
dataloader = DataLoader(framework='onnxruntime', dataset=dataset, batch_size=args.batch_size)
metric = ONNXRTGLUE(args.task)

def eval_func(model):
Expand Down
4 changes: 2 additions & 2 deletions neural_compressor/data/dataloaders/dataloader.py
Expand Up @@ -28,7 +28,7 @@
"pytorch": PyTorchDataLoader,
"pytorch_ipex": PyTorchDataLoader,
"pytorch_fx": PyTorchDataLoader,
"onnxrt": ONNXRTDataLoader,
"onnxruntime": ONNXRTDataLoader,
"onnxrt_qlinearops": ONNXRTDataLoader,
"onnxrt_integerops": ONNXRTDataLoader,
"onnxrt_qdq": ONNXRTDataLoader,
Expand Down Expand Up @@ -67,7 +67,7 @@ def __new__(cls, framework, dataset, batch_size=1, collate_fn=None,
Defaults to False.
"""
assert framework in ('tensorflow', 'tensorflow_itex', 'keras',\
'pytorch', 'pytorch_ipex', 'pytorch_fx', 'onnxrt', 'onnxrt_qdqops', \
'pytorch', 'pytorch_ipex', 'pytorch_fx', 'onnxruntime', 'onnxrt_qdqops', \
'onnxrt_qlinearops', 'onnxrt_integerops', 'mxnet'), \
"framework support tensorflow pytorch mxnet onnxruntime"
return DATALOADERS[framework](dataset=dataset,
Expand Down
2 changes: 1 addition & 1 deletion test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py
Expand Up @@ -1416,7 +1416,7 @@ def test_query_block_info(self):
self.assertEqual(len(q_capability['block_wise']), 6)

def test_dataloader_input(self):
cv_dataloader = DataLoader(framework='onnxrt', dataset=DummyCVDataset_list(shape=(3, 224, 224)))
cv_dataloader = DataLoader(framework='onnxruntime', dataset=DummyCVDataset_list(shape=(3, 224, 224)))
quantizer = Quantization('qlinear.yaml')
quantizer.calib_dataloader = cv_dataloader
quantizer.eval_dataloader = cv_dataloader
Expand Down

0 comments on commit 447cc7f

Please sign in to comment.