diff --git a/src/sparseml/transformers/masked_language_modeling.py b/src/sparseml/transformers/masked_language_modeling.py index fb284d51545..0fe68bea2ce 100644 --- a/src/sparseml/transformers/masked_language_modeling.py +++ b/src/sparseml/transformers/masked_language_modeling.py @@ -744,11 +744,6 @@ def compute_metrics(eval_preds): num_samples_to_export=data_args.num_export_samples ) - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - def _mp_fn(index): # For xla_spawn (TPUs) diff --git a/src/sparseml/transformers/question_answering.py b/src/sparseml/transformers/question_answering.py index f6b186a285a..b8bc4c51ac6 100644 --- a/src/sparseml/transformers/question_answering.py +++ b/src/sparseml/transformers/question_answering.py @@ -844,10 +844,6 @@ def compute_metrics(p: EvalPrediction): trainer.save_sample_inputs_outputs( num_samples_to_export=data_args.num_export_samples ) - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) def _mp_fn(index): diff --git a/src/sparseml/transformers/text_classification.py b/src/sparseml/transformers/text_classification.py index b49f803d8a1..eb5cff773eb 100644 --- a/src/sparseml/transformers/text_classification.py +++ b/src/sparseml/transformers/text_classification.py @@ -873,11 +873,6 @@ def compute_metrics(p: EvalPrediction): num_samples_to_export=data_args.num_export_samples ) - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - def _split_train_val(train_dataset, val_ratio): # Fixed random seed to make split consistent across runs with the same ratio diff --git a/src/sparseml/transformers/token_classification.py b/src/sparseml/transformers/token_classification.py index 884cfc3e27b..2d1755d0c1f 100644 --- a/src/sparseml/transformers/token_classification.py +++ b/src/sparseml/transformers/token_classification.py @@ -729,11 +729,6 @@ def compute_metrics(p): num_samples_to_export=data_args.num_export_samples ) - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - def _mp_fn(index): # For xla_spawn (TPUs)