-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_nusabert_tokenizer.py
76 lines (58 loc) · 2.34 KB
/
train_nusabert_tokenizer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from dataclasses import dataclass
from datasets import load_dataset, concatenate_datasets
from transformers import AutoTokenizer
@dataclass
class Args:
vocab_size: int = 10_000
limit_alphabet: int = 100
min_frequency: int = 4
base_model_name: str = "indobenchmark/indobert-base-p1"
hf_repo_id: str = "LazarusNLP/nusabert-base"
def main(args: Args):
all_datasets = []
# IndoWiki
wiki_langs = ["ace", "ban", "bjn", "bug", "gor", "id", "jv", "map_bms", "min", "ms", "nia", "su", "tet"]
wiki = load_dataset(
"sabilmakbar/indo_wiki", "indowiki_dedup_all", split="+".join(wiki_langs), trust_remote_code=True
)
wiki = wiki.remove_columns([col for col in wiki.column_names if col != "text"])
all_datasets.append(wiki)
# KoPI-NLLB
kopi_langs = [
"ace_Latn-neardup",
"ban_Latn-neardup",
"bjn_Latn-neardup",
"jav_Latn-neardup",
"min_Latn-neardup",
"sun_Latn-neardup",
]
for lang in kopi_langs:
ds = load_dataset("acul3/KoPI-NLLB", lang, split="train", trust_remote_code=True)
ds = ds.remove_columns([col for col in ds.column_names if col != "text"])
all_datasets.append(ds)
# CulturaX
culturax_langs = ["ms", "jv", "su"]
for lang in culturax_langs:
ds = load_dataset("uonlp/CulturaX", lang, split="train", trust_remote_code=True)
ds = ds.remove_columns([col for col in ds.column_names if col != "text"])
all_datasets.append(ds)
concatenated_dataset = concatenate_datasets(all_datasets)
def batch_iterator(batch_size=1000):
for i in range(0, len(concatenated_dataset), batch_size):
yield concatenated_dataset[i : i + batch_size]["text"]
old_tokenizer = AutoTokenizer.from_pretrained(args.base_model_name, use_fast=True)
new_tokenizer = old_tokenizer.train_new_from_iterator(
text_iterator=batch_iterator(),
vocab_size=args.vocab_size,
min_frequency=args.min_frequency,
limit_alphabet=args.limit_alphabet,
show_progress=True,
)
new_tokens = set(new_tokenizer.vocab.keys()) - set(old_tokenizer.vocab.keys())
print(len(new_tokens))
old_tokenizer.add_tokens(list(new_tokens))
print(len(old_tokenizer))
old_tokenizer.push_to_hub(args.hf_repo_id, private=True)
if __name__ == "__main__":
args = Args()
main(args)