Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,18 @@ def id_to_token(self, id):

def token_to_id(self, token):
"""Convert a string token to an integer id."""

if token in self._vocabulary_prefix:
return self._vocabulary_prefix.index(token)

return int(self._sentence_piece.string_to_id(token).numpy()) + 1
spm_token_id = self._sentence_piece.string_to_id(token)

# OOV token
spm_unk_token_id = self._sentence_piece.string_to_id("<unk>")
if spm_token_id == spm_unk_token_id:
return self.unk_token_id

return int(spm_token_id.numpy()) + 1

def tokenize(self, inputs):
tokens = super().tokenize(inputs)
Expand Down
4 changes: 4 additions & 0 deletions keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ def test_id_to_token(self):
def test_token_to_id(self):
self.assertEqual(self.tokenizer.token_to_id("▁the"), 4)
self.assertEqual(self.tokenizer.token_to_id("▁round"), 10)
# Test any random OOV token.
self.assertEqual(self.tokenizer.token_to_id("<oov-token>"), 3)
# Test a special token.
self.assertEqual(self.tokenizer.token_to_id("<pad>"), 1)

def test_serialization(self):
config = keras.utils.serialize_keras_object(self.tokenizer)
Expand Down