Skip to content

Commit

Permalink
Fix Qwen2Tokenizer (#29929)
Browse files Browse the repository at this point in the history
qwen2: fixed tokens starting with # in slow tokenizer; add tests

Co-authored-by: jklj077 <17811943+jklj077@users.noreply.github.com>
  • Loading branch information
jklj077 and jklj077 committed Apr 3, 2024
1 parent 17b06e2 commit 851f253
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 4 deletions.
4 changes: 2 additions & 2 deletions src/transformers/models/qwen2/tokenization_qwen2.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,9 @@ def __init__(
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
bpe_merges = []
with open(merges_file, encoding="utf-8") as merges_handle:
for line in merges_handle:
for i, line in enumerate(merges_handle):
line = line.strip()
if not line or line.startswith("#"):
if (i == 0 and line.startswith("#version:")) or not line:
continue
bpe_merges.append(tuple(line.split()))
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
Expand Down
23 changes: 21 additions & 2 deletions tests/models/qwen2/test_tokenization_qwen2.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ def setUp(self):
";}",
";}\u010a",
"\u00cf\u0135",
"\u0120#",
"##",
]
)

Expand All @@ -75,6 +77,8 @@ def setUp(self):
"; }",
";} \u010a",
"\u00cf \u0135",
"\u0120 #",
"# #",
]

self.special_tokens_map = {"eos_token": "<|endoftext|>"}
Expand Down Expand Up @@ -129,7 +133,7 @@ def test_python_full_tokenizer(self):
self.assertListEqual(tokens, bpe_tokens)

input_tokens = tokens
input_bpe_tokens = [75, 78, 86, 260, 259, 260, 220, 77, 68, 86, 260, 220, 15, 16, 15, 266, 268, 267]
input_bpe_tokens = [75, 78, 86, 260, 259, 260, 220, 77, 68, 86, 260, 220, 15, 16, 15, 266, 270, 267]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)

@unittest.skip("We disable the test of pretokenization as it is not reversible.")
Expand All @@ -139,6 +143,11 @@ def test_pretokenized_inputs(self):
# the results, by nature, should be different.
pass

@unittest.skip("We disable the test of clean up tokenization spaces as it is not applicable.")
def test_clean_up_tokenization_spaces(self):
# it only tests bert-base-uncased and clean_up_tokenization_spaces is not applicable to this tokenizer
pass

def test_nfc_normalization(self):
# per https://unicode.org/faq/normalization.html, there are three characters whose normalization forms
# under NFC, NFD, NFKC, and NFKD are all different
Expand All @@ -158,6 +167,16 @@ def test_nfc_normalization(self):
tokenizer_output_string = tokenizer.backend_tokenizer.normalizer.normalize_str(input_string)
self.assertEqual(tokenizer_output_string, output_string)

def test_slow_tokenizer_token_with_number_sign(self):
if not self.test_slow_tokenizer:
return

sequence = " ###"
token_ids = [268, 269]

tokenizer = self.get_tokenizer()
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sequence)), token_ids)

def test_slow_tokenizer_decode_spaces_between_special_tokens_default(self):
# Qwen2Tokenizer changes the default `spaces_between_special_tokens` in `decode` to False
if not self.test_slow_tokenizer:
Expand All @@ -166,7 +185,7 @@ def test_slow_tokenizer_decode_spaces_between_special_tokens_default(self):
# tokenizer has a special token: `"<|endfotext|>"` as eos, but it is not `legacy_added_tokens`
# special tokens in `spaces_between_special_tokens` means spaces between `legacy_added_tokens`
# that would be `"<|im_start|>"` and `"<|im_end|>"` in Qwen/Qwen2 Models
token_ids = [259, 260, 268, 269, 26]
token_ids = [259, 260, 270, 271, 26]
sequence = " lower<|endoftext|><|im_start|>;"
sequence_with_space = " lower<|endoftext|> <|im_start|> ;"

Expand Down

0 comments on commit 851f253

Please sign in to comment.