diff --git a/keras_nlp/models/whisper/whisper_backbone.py b/keras_nlp/models/whisper/whisper_backbone.py index 83b9324010..fcb3530252 100644 --- a/keras_nlp/models/whisper/whisper_backbone.py +++ b/keras_nlp/models/whisper/whisper_backbone.py @@ -76,7 +76,7 @@ class WhisperBackbone(Backbone): Examples: ```python input_data = { - "encoder_token_ids": tf.ones(shape=(1, 12, 80), dtype=tf.int64), + "encoder_features": tf.ones(shape=(1, 12, 80), dtype=tf.int64), "decoder_token_ids": tf.ones(shape=(1, 12), dtype=tf.int64), "decoder_padding_mask": tf.constant( [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], shape=(1, 12) diff --git a/keras_nlp/tests/doc_tests/docstring_test.py b/keras_nlp/tests/doc_tests/docstring_test.py index 12d31e8ce7..cd5d6b61ec 100644 --- a/keras_nlp/tests/doc_tests/docstring_test.py +++ b/keras_nlp/tests/doc_tests/docstring_test.py @@ -108,15 +108,16 @@ def test_fenced_docstrings(): "keras_nlp.models.backbone", "keras_nlp.models.preprocessor", "keras_nlp.models.task", + "keras_nlp.tokenizers.byte_pair_tokenizer", + "keras_nlp.tokenizers.sentence_piece_tokenizer", + "keras_nlp.tokenizers.word_piece_tokenizer", + # Preprocessors and tokenizers which use `model.spm` (temporary). "keras_nlp.models.xlm_roberta.xlm_roberta_preprocessor", "keras_nlp.models.f_net.f_net_preprocessor", "keras_nlp.models.f_net.f_net_tokenizer", - # Preprocessors and tokenizers which use `model.spm` (temporary). "keras_nlp.models.albert.albert_preprocessor", "keras_nlp.models.albert.albert_tokenizer", - "keras_nlp.tokenizers.byte_pair_tokenizer", - "keras_nlp.tokenizers.sentence_piece_tokenizer", - "keras_nlp.tokenizers.word_piece_tokenizer", + "keras_nlp.models.t5.t5_tokenizer", ]: continue