diff --git a/keras_nlp/models/f_net/f_net_tokenizer.py b/keras_nlp/models/f_net/f_net_tokenizer.py index ed1e1cf011..9a19330f12 100644 --- a/keras_nlp/models/f_net/f_net_tokenizer.py +++ b/keras_nlp/models/f_net/f_net_tokenizer.py @@ -15,15 +15,11 @@ """FNet tokenizer.""" import copy -import os - -from tensorflow import keras from keras_nlp.api_export import keras_nlp_export from keras_nlp.models.f_net.f_net_presets import backbone_presets from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer from keras_nlp.utils.python_utils import classproperty -from keras_nlp.utils.python_utils import format_docstring @keras_nlp_export("keras_nlp.models.FNetTokenizer") @@ -92,52 +88,3 @@ def __init__(self, proto, **kwargs): @classproperty def presets(cls): return copy.deepcopy(backbone_presets) - - @classmethod - @format_docstring(names=", ".join(backbone_presets)) - def from_preset( - cls, - preset, - **kwargs, - ): - """Instantiate an FNet tokenizer from preset vocabulary. - - Args: - preset: string. Must be one of {{names}}. - - Examples: - ```python - # Load a preset tokenizer. - tokenizer = keras_nlp.models.FNetTokenizer.from_preset( - "f_net_base_en", - ) - - # Tokenize some input. - tokenizer("The quick brown fox tripped.") - - # Detokenize some input. - tokenizer.detokenize([5, 6, 7, 8, 9]) - ``` - """ - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - metadata = cls.presets[preset] - - spm_proto = keras.utils.get_file( - "vocab.spm", - metadata["spm_proto_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["spm_proto_hash"], - ) - - config = metadata["preprocessor_config"] - config.update( - { - "proto": spm_proto, - }, - ) - - return cls.from_config({**config, **kwargs})