diff --git a/docs/_posts/gadde5300/2024-05-15-legner_lener_base_pt.md b/docs/_posts/gadde5300/2024-05-15-legner_lener_base_pt.md index 2052d83397..c0393a0287 100644 --- a/docs/_posts/gadde5300/2024-05-15-legner_lener_base_pt.md +++ b/docs/_posts/gadde5300/2024-05-15-legner_lener_base_pt.md @@ -64,7 +64,7 @@ tokenizer = nlp.Tokenizer()\ .setInputCols("sentence")\ .setOutputCol("token") -tokenClassifier = legal.BertForTokenClassification.load("legner_lener_base","pt", "legal/models")\ +tokenClassifier = legal.BertForTokenClassification.pretrained("legner_lener_base","pt", "legal/models")\ .setInputCols("token", "sentence")\ .setOutputCol("label")\ .setCaseSensitive(True) @@ -226,4 +226,4 @@ result = pipeline.fit(example).transform(example) ## References -Original texts available in https://paperswithcode.com/sota?task=Token+Classification&dataset=lener_br and in-house data augmentation with weak labelling \ No newline at end of file +Original texts available in https://paperswithcode.com/sota?task=Token+Classification&dataset=lener_br and in-house data augmentation with weak labelling diff --git a/docs/_posts/gadde5300/2024-05-15-legner_lener_large_pt.md b/docs/_posts/gadde5300/2024-05-15-legner_lener_large_pt.md index b97377c13c..b9e57da371 100644 --- a/docs/_posts/gadde5300/2024-05-15-legner_lener_large_pt.md +++ b/docs/_posts/gadde5300/2024-05-15-legner_lener_large_pt.md @@ -64,7 +64,7 @@ tokenizer = nlp.Tokenizer()\ .setInputCols("sentence")\ .setOutputCol("token") -tokenClassifier = legal.BertForTokenClassification.load("legner_lener_large","pt", "legal/models")\ +tokenClassifier = legal.BertForTokenClassification.pretrained("legner_lener_large","pt", "legal/models")\ .setInputCols("token", "sentence")\ .setOutputCol("label")\ .setCaseSensitive(True) @@ -226,4 +226,4 @@ result = pipeline.fit(example).transform(example) ## References -Original texts available in https://paperswithcode.com/sota?task=Token+Classification&dataset=lener_br and in-house data augmentation with weak labelling \ No newline at end of file +Original texts available in https://paperswithcode.com/sota?task=Token+Classification&dataset=lener_br and in-house data augmentation with weak labelling