diff --git a/keras_nlp/models/f_net/f_net_backbone.py b/keras_nlp/models/f_net/f_net_backbone.py index 938ce61e5b..5fa1e8533a 100644 --- a/keras_nlp/models/f_net/f_net_backbone.py +++ b/keras_nlp/models/f_net/f_net_backbone.py @@ -37,16 +37,16 @@ def f_net_bias_initializer(stddev=0.02): @keras_nlp_export("keras_nlp.models.FNetBackbone") class FNetBackbone(Backbone): - """FNet encoder network. + """A FNet encoder network. This class implements a bi-directional Fourier Transform-based encoder as described in ["FNet: Mixing Tokens with Fourier Transforms"](https://arxiv.org/abs/2105.03824). It includes the embedding lookups and `keras_nlp.layers.FNetEncoder` layers, but not the masked language model or next sentence prediction heads. - The default constructor gives a fully customizable, randomly initialized FNet - encoder with any number of layers and embedding dimensions. To load - preset architectures and weights, use the `from_preset` constructor. + The default constructor gives a fully customizable, randomly initialized + FNet encoder with any number of layers and embedding dimensions. To + load preset architectures and weights, use the `from_preset()` constructor. Note: unlike other models, FNet does not take in a `"padding_mask"` input, the `""` token is handled equivalently to all other tokens in the input @@ -78,15 +78,19 @@ class FNetBackbone(Backbone): ), } - # Randomly initialized FNet encoder with a custom config + # Pretrained BERT encoder. + model = keras_nlp.models.FNetBackbone.from_preset("f_net_base_en") + model(input_data) + + # Randomly initialized FNet encoder with a custom config. model = keras_nlp.models.FNetBackbone( vocabulary_size=32000, - num_layers=12, - hidden_dim=768, - intermediate_dim=3072, - max_sequence_length=12, + num_layers=4, + hidden_dim=256, + intermediate_dim=512, + max_sequence_length=128, ) - output = model(input_data) + model(input_data) ``` """ diff --git a/keras_nlp/models/f_net/f_net_classifier.py b/keras_nlp/models/f_net/f_net_classifier.py index 2eaac26ec7..6efea16208 100644 --- a/keras_nlp/models/f_net/f_net_classifier.py +++ b/keras_nlp/models/f_net/f_net_classifier.py @@ -33,9 +33,9 @@ class FNetClassifier(Task): """An end-to-end f_net model for classification tasks. This model attaches a classification head to a - `keras_nlp.model.FNetBackbone` model, mapping from the backbone - outputs to logit output suitable for a classification task. For usage of - this model with pre-trained weights, see the `from_preset()` method. + `keras_nlp.model.FNetBackbone` instance, mapping from the backbone outputs + to logits suitable for a classification task. For usage of this model with + pre-trained weights, use the `from_preset()` constructor. This model can optionally be configured with a `preprocessor` layer, in which case it will automatically apply preprocessing to raw inputs during @@ -55,41 +55,50 @@ class FNetClassifier(Task): `None`, this model will not apply preprocessing, and inputs should be preprocessed before calling the model. - Example usage: + Examples: + + Raw string data. + ```python + features = ["The quick brown fox jumped.", "I forgot my homework."] + labels = [0, 3] + + # Pretrained classifier. + classifier = keras_nlp.models.FNetClassifier.from_preset( + "f_net_base_en", + num_classes=4, + ) + classifier.fit(x=features, y=labels, batch_size=2) + classifier.predict(x=features, batch_size=2) + + # Re-compile (e.g., with a new learning rate). + classifier.compile( + loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + optimizer=keras.optimizers.Adam(5e-5), + jit_compile=True, + ) + # Access backbone programatically (e.g., to change `trainable`). + classifier.backbone.trainable = False + # Fit again. + classifier.fit(x=features, y=labels, batch_size=2) + ``` + + Preprocessed integer data. ```python - preprocessed_features = { + features = { "token_ids": tf.ones(shape=(2, 12), dtype=tf.int64), "segment_ids": tf.constant( [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12) ), - "padding_mask": tf.constant( - [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12) - ), } labels = [0, 3] - # Randomly initialize a FNet backbone. - backbone = keras_nlp.models.FNetBackbone( - vocabulary_size=32000, - num_layers=12, - hidden_dim=768, - intermediate_dim=3072, - max_sequence_length=12, - ) - - # Create a FNet classifier and fit your data. - classifier = keras_nlp.models.FNetClassifier( - backbone, + # Pretrained classifier without preprocessing. + classifier = keras_nlp.models.FNetClassifier.from_preset( + "f_net_base_en", num_classes=4, preprocessor=None, ) - classifier.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), - ) - classifier.fit(x=preprocessed_features, y=labels, batch_size=2) - - # Access backbone programatically (e.g., to change `trainable`) - classifier.backbone.trainable = False + classifier.fit(x=features, y=labels, batch_size=2) ``` """ diff --git a/keras_nlp/models/f_net/f_net_masked_lm.py b/keras_nlp/models/f_net/f_net_masked_lm.py index 67bc50f74d..884cedec8e 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm.py +++ b/keras_nlp/models/f_net/f_net_masked_lm.py @@ -35,7 +35,7 @@ class FNetMaskedLM(Task): This model will train FNet on a masked language modeling task. The model will predict labels for a number of masked tokens in the input data. For usage of this model with pre-trained weights, see the - `from_preset()` method. + `from_preset()` constructor. This model can optionally be configured with a `preprocessor` layer, in which case inputs can be raw string features during `fit()`, `predict()`, @@ -54,26 +54,33 @@ class FNetMaskedLM(Task): Example usage: - Raw string inputs and pretrained backbone. + Raw string data. ```python - # Create a dataset with raw string features. Labels are inferred. + features = ["The quick brown fox jumped.", "I forgot my homework."] - # Create a FNetMaskedLM with a pretrained backbone and further train - # on an MLM task. + # Pretrained language model. masked_lm = keras_nlp.models.FNetMaskedLM.from_preset( "f_net_base_en", ) + masked_lm.fit(x=features, batch_size=2) + + # Re-compile (e.g., with a new learning rate). masked_lm.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + optimizer=keras.optimizers.Adam(5e-5), + jit_compile=True, ) + # Access backbone programatically (e.g., to change `trainable`). + masked_lm.backbone.trainable = False + # Fit again. masked_lm.fit(x=features, batch_size=2) ``` - Preprocessed inputs and custom backbone. + Preprocessed integer data. ```python # Create a preprocessed dataset where 0 is the mask token. - preprocessed_features = { + features = { "token_ids": tf.constant( [[1, 2, 0, 4, 0, 6, 7, 8]] * 2, shape=(2, 8) ), @@ -85,23 +92,11 @@ class FNetMaskedLM(Task): # Labels are the original masked values. labels = [[3, 5]] * 2 - # Randomly initialize a FNet encoder - backbone = keras_nlp.models.FNetBackbone( - vocabulary_size=50265, - num_layers=12, - hidden_dim=768, - intermediate_dim=3072, - max_sequence_length=12 - ) - # Create a FNet masked_lm and fit the data. - masked_lm = keras_nlp.models.FNetMaskedLM( - backbone, + masked_lm = keras_nlp.models.FNetMaskedLM.from_preset( + "f_net_base_en", preprocessor=None, ) - masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), - ) - masked_lm.fit(x=preprocessed_features, y=labels, batch_size=2) + masked_lm.fit(x=features, y=labels, batch_size=2) ``` """ diff --git a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py index ee4cccefbf..3aac7e13f9 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py @@ -28,14 +28,14 @@ class FNetMaskedLMPreprocessor(FNetPreprocessor): `keras_nlp.models.FNetMaskedLM` task model. Preprocessing will occur in multiple steps. - - Tokenize any number of input segments using the `tokenizer`. - - Pack the inputs together with the appropriate `""`, `""` and + 1. Tokenize any number of input segments using the `tokenizer`. + 2. Pack the inputs together with the appropriate `""`, `""` and `""` tokens, i.e., adding a single `""` at the start of the entire sequence, `""` between each segment, and a `""` at the end of the entire sequence. - - Randomly select non-special tokens to mask, controlled by + 3. Randomly select non-special tokens to mask, controlled by `mask_selection_rate`. - - Construct a `(x, y, sample_weight)` tuple suitable for training with a + 4. Construct a `(x, y, sample_weight)` tuple suitable for training with a `keras_nlp.models.FNetMaskedLM` task model. Args: @@ -66,6 +66,8 @@ class FNetMaskedLMPreprocessor(FNetPreprocessor): out of budget. It supports an arbitrary number of segments. Examples: + + Directly calling the layer on data. ```python # Load the preprocessor from a preset. preprocessor = keras_nlp.models.FNetMaskedLMPreprocessor.from_preset( @@ -73,20 +75,29 @@ class FNetMaskedLMPreprocessor(FNetPreprocessor): ) # Tokenize and mask a single sentence. - sentence = tf.constant("The quick brown fox jumped.") - preprocessor(sentence) + preprocessor("The quick brown fox jumped.") - # Tokenize and mask a batch of sentences. - sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - preprocessor(sentences) + # Tokenize and mask a batch of single sentences. + preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) - # Tokenize and mask a dataset of sentences. - features = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] + # Tokenize and mask sentence pairs. + # In this case, always convert input to tensors before calling the layer. + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + preprocessor((first, second)) + ``` + + Mapping with `tf.data.Dataset`. + ```python + preprocessor = keras_nlp.models.FNetMaskedLMPreprocessor.from_preset( + "f_net_base_en" ) - ds = tf.data.Dataset.from_tensor_slices((features)) + + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + + # Map single sentences. + ds = tf.data.Dataset.from_tensor_slices(first) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) # Alternatively, you can create a preprocessor from your own vocabulary. @@ -94,26 +105,14 @@ class FNetMaskedLMPreprocessor(FNetPreprocessor): ["the quick brown fox", "the earth is round"] ) - # Creating sentencepiece tokenizer for FNet LM preprocessor - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", + # Map sentence pairs. + ds = tf.data.Dataset.from_tensor_slices((first, second)) + # Watch out for tf.data's default unpacking of tuples here! + # Best to invoke the `preprocessor` directly in this case. + ds = ds.map( + lambda first, second: preprocessor(x=(first, second)), + num_parallel_calls=tf.data.AUTOTUNE, ) - proto = bytes_io.getvalue() - tokenizer = keras_nlp.models.FNetTokenizer(proto=proto) - preprocessor = keras_nlp.models.FNetMaskedLMPreprocessor(tokenizer=tokenizer) ``` """ diff --git a/keras_nlp/models/f_net/f_net_preprocessor.py b/keras_nlp/models/f_net/f_net_preprocessor.py index 980d7fa70b..21c6ec7f1b 100644 --- a/keras_nlp/models/f_net/f_net_preprocessor.py +++ b/keras_nlp/models/f_net/f_net_preprocessor.py @@ -33,29 +33,16 @@ class FNetPreprocessor(Preprocessor): This preprocessing layer will do three things: - - Tokenize any number of input segments using the `tokenizer`. - - Pack the inputs together using a `keras_nlp.layers.MultiSegmentPacker`. + 1. Tokenize any number of input segments using the `tokenizer`. + 2. Pack the inputs together using a `keras_nlp.layers.MultiSegmentPacker`. with the appropriate `"[CLS]"`, `"[SEP]"` and `""` tokens. - - Construct a dictionary with keys `"token_ids"`, and `"segment_ids"` that + 3. Construct a dictionary with keys `"token_ids"`, and `"segment_ids"` that can be passed directly to `keras_nlp.models.FNetBackbone`. This layer can be used directly with `tf.data.Dataset.map` to preprocess string data in the `(x, y, sample_weight)` format used by `keras.Model.fit`. - The call method of this layer accepts three arguments, `x`, `y`, and - `sample_weight`. `x` can be a python string or tensor representing a single - segment, a list of python strings representing a batch of single segments, - or a list of tensors representing multiple segments to be packed together. - `y` and `sample_weight` are both optional, can have any format, and will be - passed through unaltered. - - Special care should be taken when using `tf.data` to map over an unlabeled - tuple of string segments. `tf.data.Dataset.map` will unpack this tuple - directly into the call arguments of this layer, rather than forward all - argument to `x`. To handle this case, it is recommended to explicitly call - the layer, e.g. `ds.map(lambda seg1, seg2: preprocessor(x=(seg1, seg2)))`. - Args: tokenizer: A `keras_nlp.models.FNetTokenizer` instance. sequence_length: The length of the packed inputs. @@ -70,7 +57,17 @@ class FNetPreprocessor(Preprocessor): left-to-right manner and fills up the buckets until we run out of budget. It supports an arbitrary number of segments. + Call arguments: + x: A tensor of single string sequences, or a tuple of multiple + tensor sequences to be packed together. Inputs may be batched or + unbatched. For single sequences, raw python inputs will be converted + to tensors. For multiple sequences, pass tensors directly. + y: Any label data. Will be passed through unaltered. + sample_weight: Any label weight data. Will be passed through unaltered. + Examples: + + Directly calling the from_preset(). ```python tokenizer = keras_nlp.models.FNetTokenizer(proto="model.spm") preprocessor = keras_nlp.models.FNetPreprocessor( @@ -79,61 +76,50 @@ class FNetPreprocessor(Preprocessor): ) # Tokenize and pack a single sentence. - sentence = tf.constant("The quick brown fox jumped.") - preprocessor(sentence) - # Same output. preprocessor("The quick brown fox jumped.") # Tokenize and a batch of single sentences. - sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - preprocessor(sentences) - # Same output. - preprocessor( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) + preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) - # Tokenize and pack a sentence pair. - first_sentence = tf.constant("The quick brown fox jumped.") - second_sentence = tf.constant("The fox tripped.") - preprocessor((first_sentence, second_sentence)) + # Preprocess a batch of sentence pairs. + # When handling multiple sequences, always convert to tensors first! + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + preprocessor((first, second)) - # Map a dataset to preprocess a single sentence. - features = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] + + preprocessor = keras_nlp.models.FNetPreprocessor(tokenizer) + preprocessor("The quick brown fox jumped.") + ``` + + Mapping with `tf.data.Dataset`. + ```python + preprocessor = keras_nlp.models.FNetPreprocessor.from_preset( + "f_net_base_en" ) - labels = tf.constant([0, 1]) - ds = tf.data.Dataset.from_tensor_slices((features, labels)) + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + label = tf.constant([1, 1]) + + # Map labeled single sentences. + ds = tf.data.Dataset.from_tensor_slices((first, label)) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) - # Map a dataset to preprocess sentence pairs. - first_sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - second_sentences = tf.constant( - ["The fox tripped.", "Oh look, a whale."] - ) - labels = tf.constant([1, 1]) - ds = tf.data.Dataset.from_tensor_slices( - ( - (first_sentences, second_sentences), labels - ) - ) + # Map unlabeled single sentences. + ds = tf.data.Dataset.from_tensor_slices(first) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) - # Map a dataset to preprocess unlabeled sentence pairs. - first_sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - second_sentences = tf.constant( - ["The fox tripped.", "Oh look, a whale."] - ) - ds = tf.data.Dataset.from_tensor_slices((first_sentences, second_sentences)) + # Map labeled sentence pairs. + ds = tf.data.Dataset.from_tensor_slices(((first, second), label)) + ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) + + # Map unlabeled sentence pairs. + ds = tf.data.Dataset.from_tensor_slices((first, second)) + # Watch out for tf.data's default unpacking of tuples here! # Best to invoke the `preprocessor` directly in this case. ds = ds.map( - lambda s1, s2: preprocessor(x=(s1, s2)), + lambda first, second: preprocessor(x=(first, second)), num_parallel_calls=tf.data.AUTOTUNE, ) ``` diff --git a/keras_nlp/models/f_net/f_net_tokenizer.py b/keras_nlp/models/f_net/f_net_tokenizer.py index 9a19330f12..29d5b3913b 100644 --- a/keras_nlp/models/f_net/f_net_tokenizer.py +++ b/keras_nlp/models/f_net/f_net_tokenizer.py @@ -49,18 +49,18 @@ class FNetTokenizer(SentencePieceTokenizer): for more details on the format. Examples: - ```python - tokenizer = keras_nlp.models.FNetTokenizer(proto="model.spm") - - # Batched inputs. - tokenizer(["the quick brown fox", "the earth is round"]) + # Unbatched input. + tokenizer = keras_nlp.models.FNetTokenizer.from_preset( + "f_net_base_en", + ) + tokenizer("The quick brown fox jumped.") - # Unbatched inputs. - tokenizer("the quick brown fox") + # Batched input. + tokenizer(["The quick brown fox jumped.", "The fox slept."]) # Detokenization. - tokenizer.detokenize(tf.constant([[2, 14, 2231, 886, 2385, 3]])) + tokenizer.detokenize(tokenizer("The quick brown fox jumped.")) ``` """