From 6da22f1943510e83f65142f6ca167af160ad6b36 Mon Sep 17 00:00:00 2001 From: ophelielacroix Date: Mon, 15 Nov 2021 15:30:08 +0100 Subject: [PATCH] clean --- danlp/models/bert_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/danlp/models/bert_models.py b/danlp/models/bert_models.py index 3712ca9..2123e4b 100644 --- a/danlp/models/bert_models.py +++ b/danlp/models/bert_models.py @@ -597,7 +597,7 @@ def _get_pred(self, tokenizer, model, max_length, sentence): max_length=max_length, truncation=True, return_overflowing_tokens=True) if 'overflowing_tokens' in input1 and input1['overflowing_tokens'].shape[1]>0: warnings.warn('Maximum length for sequence exceeded, truncation may result in unexpected results. Consider running the model on a shorter sequence than {} tokens'.format(max_length)) - pred = model(input1['input_ids'])[0] #, token_type_ids=input1['token_type_ids'])[0] + pred = model(input1['input_ids'])[0] return pred