Skip to content

Commit

Permalink
[Llava] + CIs fix red cis and llava integration tests (#30440)
Browse files Browse the repository at this point in the history
* nit

* nit and fmt skip

* fixup

* Update src/transformers/convert_slow_tokenizer.py

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>

* set to true

---------

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
  • Loading branch information
ArthurZucker and amyeroberts committed Apr 24, 2024
1 parent 767e351 commit 9a4a119
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 13 deletions.
4 changes: 2 additions & 2 deletions src/transformers/convert_slow_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def import_protobuf(error_message=""):
def _get_prepend_scheme(add_prefix_space: bool, original_tokenizer) -> str:
if add_prefix_space:
prepend_scheme = "always"
if hasattr(original_tokenizer, "legacy") and not original_tokenizer.legacy:
if not getattr(original_tokenizer, "legacy", True):
prepend_scheme = "first"
else:
prepend_scheme = "never"
Expand Down Expand Up @@ -1393,7 +1393,7 @@ def tokenizer(self, proto):
return tokenizer

def normalizer(self, proto):
if self.original_tokenizer.legacy:
if getattr(self.original_tokenizer, "legacy", True):
sequence = []
if getattr(self.original_tokenizer, "add_prefix_space"):
sequence += [normalizers.Prepend(prepend="▁")]
Expand Down
14 changes: 3 additions & 11 deletions tests/models/llava/test_modeling_llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,14 +591,6 @@ def test_tokenizer_integration(self):
fast_tokenizer.add_tokens("<image>", True)

prompt = "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n"
# If the token is added as special, it's not normalized, and the only diff is the extra space after special tokens.
# https://github.com/huggingface/transformers/pull/28881 is the fix for this.
self.assertEqual(
slow_tokenizer.tokenize(prompt),
['<|im_start|>', 'system', '\n', 'Answer', '▁the', '▁questions', '.', '<|im_end|>', '<|im_start|>', 'user', '\n', '<image>', '\n', 'What', '▁is', '▁shown', '▁in', '▁this', '▁image', '?', '<|im_end|>', '<|im_start|>', 'ass', 'istant', '\n']
) # fmt: skip

self.assertEqual(
fast_tokenizer.tokenize(prompt),
['<|im_start|>', '▁system', '\n', 'Answer', '▁the', '▁questions', '.', '<|im_end|>', '<|im_start|>', '▁user', '\n', '<image>', '▁', '\n', 'What', '▁is', '▁shown', '▁in', '▁this', '▁image', '?', '<|im_end|>', '<|im_start|>', '▁assistant', '\n']
) # fmt: skip
EXPECTED_OUTPUT = ['<|im_start|>', 'system', '\n', 'Answer', '▁the', '▁questions', '.', '<|im_end|>', '<|im_start|>', 'user', '\n', '<image>', '\n', 'What', '▁is', '▁shown', '▁in', '▁this', '▁image', '?', '<|im_end|>', '<|im_start|>', 'ass', 'istant', '\n'] # fmt: skip
self.assertEqual(slow_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
self.assertEqual(fast_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)

0 comments on commit 9a4a119

Please sign in to comment.