Skip to content

Commit

Permalink
Remove deprecated argument from tests and examples (#1072)
Browse files Browse the repository at this point in the history
* Remove deprecated argument from tests and examples

* fix style
  • Loading branch information
echarlaix committed May 26, 2023
1 parent 369f44e commit 4c2389a
Show file tree
Hide file tree
Showing 17 changed files with 64 additions and 73 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ def main():
)

# Export the model
model = ORTModelForMultipleChoice.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForMultipleChoice.from_pretrained(model_args.model_name_or_path, export=True)

# Create the optimizer
optimizer = ORTOptimizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def main():
)

# Export the model
model = ORTModelForQuestionAnswering.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForQuestionAnswering.from_pretrained(model_args.model_name_or_path, export=True)

# Create the optimizer
optimizer = ORTOptimizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ def main():
)

# Export the model
model = ORTModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, export=True)

# Create the optimizer
optimizer = ORTOptimizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def main():
)

# Export the model
model = ORTModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForTokenClassification.from_pretrained(model_args.model_name_or_path, export=True)

# Create the optimizer
optimizer = ORTOptimizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def compute_metrics(p: EvalPrediction):
return result

# Export the model
model = ORTModelForImageClassification.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForImageClassification.from_pretrained(model_args.model_name_or_path, export=True)

# Create the quantizer
quantizer = ORTQuantizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def compute_metrics(eval_predictions):
)

# Export the model
model = ORTModelForMultipleChoice.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForMultipleChoice.from_pretrained(model_args.model_name_or_path, export=True)

# Create the quantizer
quantizer = ORTQuantizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ def compute_metrics(p: EvalPrediction):
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)

# Export the model
model = ORTModelForQuestionAnswering.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForQuestionAnswering.from_pretrained(model_args.model_name_or_path, export=True)

# Create the quantizer
quantizer = ORTQuantizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def compute_metrics(p: EvalPrediction):
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)

# Export the model
model = ORTModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, export=True)

# Create the quantizer
quantizer = ORTQuantizer.from_pretrained(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -480,7 +480,7 @@ def compute_metrics(p):
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)

# Export the model
model = ORTModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_transformers=True)
model = ORTModelForTokenClassification.from_pretrained(model_args.model_name_or_path, export=True)

# Create the quantizer
quantizer = ORTQuantizer.from_pretrained(model)
Expand Down
4 changes: 2 additions & 2 deletions optimum/onnxruntime/modeling_ort.py
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ def from_pretrained(
use_merged (`Optional[bool]`, defaults to `None`):
whether or not to use a single ONNX that handles both the decoding without and with past key values reuse. This option defaults
to `True` if loading from a local repository and a merged decoder is found. When exporting with `from_transformers=True`,
to `True` if loading from a local repository and a merged decoder is found. When exporting with `export=True`,
defaults to `False`. This option should be set to `True` to minimize memory usage.
Returns:
Expand Down Expand Up @@ -1391,7 +1391,7 @@ def forward(
>>> from optimum.onnxruntime import {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}", from_transformers=True)
>>> model = {model_class}.from_pretrained("{checkpoint}", export=True)
>>> num_choices = 4
>>> first_sentence = ["Members of the procession walk down the street holding small horn brass instruments."] * num_choices
Expand Down
4 changes: 2 additions & 2 deletions optimum/onnxruntime/modeling_seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> tokenizer = {tokenizer_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}", from_transformers=True)
>>> model = {model_class}.from_pretrained("{checkpoint}", export=True)
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
Expand All @@ -270,7 +270,7 @@
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> tokenizer = {tokenizer_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}", from_transformers=True)
>>> model = {model_class}.from_pretrained("{checkpoint}", export=True)
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
Expand Down
2 changes: 1 addition & 1 deletion optimum/onnxruntime/optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def from_pretrained(
raise NotImplementedError(
"ORTOptimizer does not support ORTModelForCausalLM models that use a single ONNX for both the without/with past cases."
" Please pass an ORTModelForCausalLM that uses a separate ONNX for each without/with past cases. This can be done"
" by using `ORTModelForCausalLM.from_pretrained(..., from_transformers=True, use_merged=False)`, or by"
" by using `ORTModelForCausalLM.from_pretrained(..., export=True, use_merged=False)`, or by"
" using the option `--no-post-process` in the optimum-cli ONNX export tool."
)
onnx_model_path.append(model_or_path.decoder_model_path)
Expand Down
2 changes: 1 addition & 1 deletion optimum/onnxruntime/runs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(self, run_config):
)

onnx_model = ORT_SUPPORTED_TASKS[self.task]["class"][0].from_pretrained(
run_config["model_name_or_path"], from_transformers=True
run_config["model_name_or_path"], export=True
)

trfs_model = FeaturesManager.get_model_from_feature(
Expand Down
6 changes: 3 additions & 3 deletions optimum/pipelines/pipelines_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def load_ort_pipeline(

if model is None:
model_id = SUPPORTED_TASKS[targeted_task]["default"]
model = SUPPORTED_TASKS[targeted_task]["class"][0].from_pretrained(model_id, from_transformers=True)
model = SUPPORTED_TASKS[targeted_task]["class"][0].from_pretrained(model_id, export=True)
elif isinstance(model, str):
from ..onnxruntime.modeling_seq2seq import ENCODER_ONNX_FILE_PATTERN, ORTModelForConditionalGeneration

Expand All @@ -249,8 +249,8 @@ def load_ort_pipeline(
use_auth_token=use_auth_token,
revision=revision,
)
from_transformers = len(onnx_files) == 0
model = ort_model_class.from_pretrained(model, from_transformers=from_transformers, **model_kwargs)
export = len(onnx_files) == 0
model = ort_model_class.from_pretrained(model, export=export, **model_kwargs)
elif isinstance(model, ORTModel):
if tokenizer is None and load_tokenizer:
for preprocessor in model.preprocessors:
Expand Down
Loading

0 comments on commit 4c2389a

Please sign in to comment.