From 2737ec479421fb6f758bf1445afc049150c613fa Mon Sep 17 00:00:00 2001 From: dbogunowicz <97082108+dbogunowicz@users.noreply.github.com> Date: Wed, 22 Nov 2023 16:14:50 +0100 Subject: [PATCH 1/3] "AttributeError: 'torch._C.Value' object has no attribute 'float'" sparseml.transformers.export_onnx for zoo:bert-base_cased-squad_wikipedia_bookcorpus-pruned80.4block_quantized (#1847) --- src/sparseml/pytorch/utils/exporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sparseml/pytorch/utils/exporter.py b/src/sparseml/pytorch/utils/exporter.py index 4d7e44b51ac..d54875a98ea 100644 --- a/src/sparseml/pytorch/utils/exporter.py +++ b/src/sparseml/pytorch/utils/exporter.py @@ -460,7 +460,7 @@ def export_onnx( https://pytorch.org/docs/stable/onnx.html """ if _PARSED_TORCH_VERSION >= version.parse("1.10.0") and opset < 13 and convert_qat: - warnings.warn( + raise ValueError( "Exporting onnx with QAT and opset < 13 may result in errors. " "Please use opset>=13 with QAT. " "See https://github.com/pytorch/pytorch/issues/77455 for more info. " From b0209e786fa3bde1a041a280e47b157df0a03e1a Mon Sep 17 00:00:00 2001 From: Damian Date: Tue, 28 Nov 2023 09:20:15 +0000 Subject: [PATCH 2/3] initial commit --- src/sparseml/transformers/export.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/sparseml/transformers/export.py b/src/sparseml/transformers/export.py index 0ef3f82c7d8..8b6cc08dedd 100644 --- a/src/sparseml/transformers/export.py +++ b/src/sparseml/transformers/export.py @@ -297,10 +297,20 @@ def export_transformer_to_onnx( ) if sequence_length is None: + if hasattr(config, "max_position_embeddings"): + sequence_length = config.max_position_embeddings + elif hasattr(config, "max_seq_len"): + sequence_length = config.max_seq_len + else: + raise ValueError( + "Could not infer a default sequence length " + "from the HF transformers config. Please specify " + "a sequence length with --sequence_length" + ) _LOGGER.info( - f"Using default sequence length of {config.max_position_embeddings}" + f"Using default sequence length of {sequence_length} " + "(inferred from HF transformers config) " ) - sequence_length = config.max_position_embeddings tokenizer = AutoTokenizer.from_pretrained( model_path, model_max_length=sequence_length From 4a1ca9517ce4ce06b574f0a57b77df380752efc3 Mon Sep 17 00:00:00 2001 From: dbogunowicz <97082108+dbogunowicz@users.noreply.github.com> Date: Tue, 28 Nov 2023 10:29:07 +0100 Subject: [PATCH 3/3] Update src/sparseml/transformers/export.py --- src/sparseml/transformers/export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sparseml/transformers/export.py b/src/sparseml/transformers/export.py index 8b6cc08dedd..370dfe4234b 100644 --- a/src/sparseml/transformers/export.py +++ b/src/sparseml/transformers/export.py @@ -305,7 +305,7 @@ def export_transformer_to_onnx( raise ValueError( "Could not infer a default sequence length " "from the HF transformers config. Please specify " - "a sequence length with --sequence_length" + "the sequence length with --sequence_length" ) _LOGGER.info( f"Using default sequence length of {sequence_length} "