From 4dd8299dff38603469e33b9c8b11f259f50f0985 Mon Sep 17 00:00:00 2001 From: cosmo3769 Date: Tue, 13 Aug 2024 04:31:45 +0530 Subject: [PATCH 1/7] ported mistral --- keras_nlp/src/utils/transformers/convert.py | 10 ++ .../src/utils/transformers/convert_mistral.py | 144 ++++++++++++++++++ 2 files changed, 154 insertions(+) create mode 100644 keras_nlp/src/utils/transformers/convert_mistral.py diff --git a/keras_nlp/src/utils/transformers/convert.py b/keras_nlp/src/utils/transformers/convert.py index e2cded6ce6..447c0fb895 100644 --- a/keras_nlp/src/utils/transformers/convert.py +++ b/keras_nlp/src/utils/transformers/convert.py @@ -30,6 +30,12 @@ from keras_nlp.src.utils.transformers.convert_llama3 import ( load_llama3_tokenizer, ) +from keras_nlp.src.utils.transformers.convert_mistral import ( + load_mistral_backbone, +) +from keras_nlp.src.utils.transformers.convert_mistral import ( + load_mistral_tokenizer, +) from keras_nlp.src.utils.transformers.convert_pali_gemma import ( load_pali_gemma_backbone, ) @@ -64,6 +70,8 @@ def load_transformers_backbone(cls, preset, load_weights): return load_gpt2_backbone(cls, preset, load_weights) if cls.__name__ == "DistilBertBackbone": return load_distilbert_backbone(cls, preset, load_weights) + if cls.__name__ == "MistralBackbone": + return load_mistral_backbone(cls, preset, load_weights) raise ValueError( f"{cls} has not been ported from the Hugging Face format yet. " "Please check Hugging Face Hub for the Keras model. " @@ -95,6 +103,8 @@ def load_transformers_tokenizer(cls, preset): return load_gpt2_tokenizer(cls, preset) if cls.__name__ == "DistilBertTokenizer": return load_distilbert_tokenizer(cls, preset) + if cls.__name__ == "MisralTokenizer": + return load_mistral_tokenizer(cls, preset) raise ValueError( f"{cls} has not been ported from the Hugging Face format yet. " "Please check Hugging Face Hub for the Keras model. " diff --git a/keras_nlp/src/utils/transformers/convert_mistral.py b/keras_nlp/src/utils/transformers/convert_mistral.py new file mode 100644 index 0000000000..f462526f90 --- /dev/null +++ b/keras_nlp/src/utils/transformers/convert_mistral.py @@ -0,0 +1,144 @@ +# Copyright 2024 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np + +from keras_nlp.src.utils.preset_utils import HF_CONFIG_FILE +from keras_nlp.src.utils.preset_utils import get_file +from keras_nlp.src.utils.preset_utils import jax_memory_cleanup +from keras_nlp.src.utils.preset_utils import load_config +from keras_nlp.src.utils.transformers.safetensor_utils import SafetensorLoader + + +def convert_backbone_config(transformers_config): + return { + "vocabulary_size": transformers_config["vocab_size"], + "num_layers": transformers_config["num_hidden_layers"], + "num_query_heads": transformers_config["num_attention_heads"], + "hidden_dim": transformers_config["hidden_size"], + "intermediate_dim": transformers_config["intermediate_size"] * 2, + "num_key_value_heads": transformers_config["num_key_value_heads"], + "rope_max_wavelength": transformers_config["rope_theta"], + "layer_norm_epsilon": transformers_config["rms_norm_eps"], + "sliding_window": transformers_config["sliding_window"], + "dtype": transformers_config["torch_dtype"], + } + + +def convert_weights(backbone, loader): + # Embeddings + loader.port_weight( + keras_variable=backbone.token_embedding.embeddings, + hf_weight_key="model.embed_tokens.weight", + hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float32), + ) + loader.port_weight( + keras_variable=backbone.token_embedding.reverse_embeddings, + hf_weight_key="lm_head.weight", + hook_fn=lambda hf_tensor, _: np.transpose( + hf_tensor.astype(np.float32), axes=(1, 0) + ), + ) + + # Attention blocks + for index in range(backbone.num_layers): + decoder_layer = backbone.transformer_layers[index] + + # Norm layers + loader.port_weight( + keras_variable=decoder_layer._self_attention_layernorm.scale, + hf_weight_key=f"model.layers.{index}.input_layernorm.weight", + hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float32), + ) + loader.port_weight( + keras_variable=decoder_layer._feedforward_layernorm.scale, + hf_weight_key=f"model.layers.{index}.post_attention_layernorm.weight", + hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float32), + ) + + # Attention layers + loader.port_weight( + keras_variable=decoder_layer._self_attention_layer._query_dense.kernel, + hf_weight_key=f"model.layers.{index}.self_attn.q_proj.weight", + hook_fn=lambda hf_tensor, keras_shape: np.reshape( + np.transpose(hf_tensor.astype(np.float32)), keras_shape + ), + ) + loader.port_weight( + keras_variable=decoder_layer._self_attention_layer._key_dense.kernel, + hf_weight_key=f"model.layers.{index}.self_attn.k_proj.weight", + hook_fn=lambda hf_tensor, keras_shape: np.reshape( + np.transpose(hf_tensor.astype(np.float32)), keras_shape + ), + ) + loader.port_weight( + keras_variable=decoder_layer._self_attention_layer._value_dense.kernel, + hf_weight_key=f"model.layers.{index}.self_attn.v_proj.weight", + hook_fn=lambda hf_tensor, keras_shape: np.reshape( + np.transpose(hf_tensor.astype(np.float32)), keras_shape + ), + ) + loader.port_weight( + keras_variable=decoder_layer._self_attention_layer._output_dense.kernel, + hf_weight_key=f"model.layers.{index}.self_attn.o_proj.weight", + hook_fn=lambda hf_tensor, keras_shape: np.reshape( + np.transpose(hf_tensor.astype(np.float32)), keras_shape + ), + ) + + # MLP layers + loader.port_weight( + keras_variable=decoder_layer._feedforward_gate_dense.kernel, + hf_weight_key=f"model.layers.{index}.mlp.gate_proj.weight", + hook_fn=lambda hf_tensor, _: np.transpose( + hf_tensor.astype(np.float32), axes=(1, 0) + ), + ) + loader.port_weight( + keras_variable=decoder_layer._feedforward_intermediate_dense.kernel, + hf_weight_key=f"model.layers.{index}.mlp.up_proj.weight", + hook_fn=lambda hf_tensor, _: np.transpose( + hf_tensor.astype(np.float32), axes=(1, 0) + ), + ) + loader.port_weight( + keras_variable=decoder_layer._feedforward_output_dense.kernel, + hf_weight_key=f"model.layers.{index}.mlp.down_proj.weight", + hook_fn=lambda hf_tensor, _: np.transpose( + hf_tensor.astype(np.float32), axes=(1, 0) + ), + ) + + # Normalization + loader.port_weight( + keras_variable=backbone.layer_norm.scale, + hf_weight_key="model.norm.weight", + hook_fn=lambda hf_tensor, keras_variable: hf_tensor.astype(np.float32), + ) + + return backbone + + +def load_mistral_backbone(cls, preset, load_weights): + transformers_config = load_config(preset, HF_CONFIG_FILE) + keras_config = convert_backbone_config(transformers_config) + backbone = cls(**keras_config) + if load_weights: + jax_memory_cleanup(backbone) + with SafetensorLoader(preset) as loader: + convert_weights(backbone, loader) + return backbone + + +def load_mistral_tokenizer(cls, preset): + return cls(get_file(preset, "tokenizer.model")) From f64800238546733d0161179a1135f5289e1117b8 Mon Sep 17 00:00:00 2001 From: cosmo3769 Date: Tue, 13 Aug 2024 04:32:16 +0530 Subject: [PATCH 2/7] update test --- .../transformers/convert_mistral_test.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 keras_nlp/src/utils/transformers/convert_mistral_test.py diff --git a/keras_nlp/src/utils/transformers/convert_mistral_test.py b/keras_nlp/src/utils/transformers/convert_mistral_test.py new file mode 100644 index 0000000000..878f3d9c8f --- /dev/null +++ b/keras_nlp/src/utils/transformers/convert_mistral_test.py @@ -0,0 +1,27 @@ +# Copyright 2024 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from keras_nlp.src.models.mistral.mistral_causal_lm import MistralCausalLM +from keras_nlp.src.tests.test_case import TestCase + + +class TestTask(TestCase): + @pytest.mark.large + def test_convert_tiny_preset(self): + model = MistralCausalLM.from_preset("hf://mistralai/Mistral-7B-v0.1") + prompt = "What is your favorite condiment?" + model.generate([prompt], max_length=15) + + # TODO: compare numerics with huggingface model From 151c46663f5149fba1f0268997ebad500fc3113b Mon Sep 17 00:00:00 2001 From: cosmo3769 Date: Tue, 13 Aug 2024 04:52:44 +0530 Subject: [PATCH 3/7] fix config --- keras_nlp/src/utils/transformers/convert_mistral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/src/utils/transformers/convert_mistral.py b/keras_nlp/src/utils/transformers/convert_mistral.py index f462526f90..f455cfa56f 100644 --- a/keras_nlp/src/utils/transformers/convert_mistral.py +++ b/keras_nlp/src/utils/transformers/convert_mistral.py @@ -26,7 +26,7 @@ def convert_backbone_config(transformers_config): "num_layers": transformers_config["num_hidden_layers"], "num_query_heads": transformers_config["num_attention_heads"], "hidden_dim": transformers_config["hidden_size"], - "intermediate_dim": transformers_config["intermediate_size"] * 2, + "intermediate_dim": transformers_config["intermediate_size"], "num_key_value_heads": transformers_config["num_key_value_heads"], "rope_max_wavelength": transformers_config["rope_theta"], "layer_norm_epsilon": transformers_config["rms_norm_eps"], From 8ab13c4df9cef92f4725cf34d3ad8535ccd32a79 Mon Sep 17 00:00:00 2001 From: cosmo3769 Date: Tue, 13 Aug 2024 04:52:55 +0530 Subject: [PATCH 4/7] fix typo --- keras_nlp/src/utils/transformers/convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/src/utils/transformers/convert.py b/keras_nlp/src/utils/transformers/convert.py index 447c0fb895..db52dcc20b 100644 --- a/keras_nlp/src/utils/transformers/convert.py +++ b/keras_nlp/src/utils/transformers/convert.py @@ -103,7 +103,7 @@ def load_transformers_tokenizer(cls, preset): return load_gpt2_tokenizer(cls, preset) if cls.__name__ == "DistilBertTokenizer": return load_distilbert_tokenizer(cls, preset) - if cls.__name__ == "MisralTokenizer": + if cls.__name__ == "MistralTokenizer": return load_mistral_tokenizer(cls, preset) raise ValueError( f"{cls} has not been ported from the Hugging Face format yet. " From a55d28b26d246b13c0191bbdd3537573d36e2ef3 Mon Sep 17 00:00:00 2001 From: cosmo3769 Date: Tue, 13 Aug 2024 05:47:53 +0530 Subject: [PATCH 5/7] switched float32 to float16 --- .../src/utils/transformers/convert_mistral.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/keras_nlp/src/utils/transformers/convert_mistral.py b/keras_nlp/src/utils/transformers/convert_mistral.py index f455cfa56f..cf9ae4c4a4 100644 --- a/keras_nlp/src/utils/transformers/convert_mistral.py +++ b/keras_nlp/src/utils/transformers/convert_mistral.py @@ -40,13 +40,13 @@ def convert_weights(backbone, loader): loader.port_weight( keras_variable=backbone.token_embedding.embeddings, hf_weight_key="model.embed_tokens.weight", - hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float32), + hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float16), ) loader.port_weight( keras_variable=backbone.token_embedding.reverse_embeddings, hf_weight_key="lm_head.weight", hook_fn=lambda hf_tensor, _: np.transpose( - hf_tensor.astype(np.float32), axes=(1, 0) + hf_tensor.astype(np.float16), axes=(1, 0) ), ) @@ -58,12 +58,12 @@ def convert_weights(backbone, loader): loader.port_weight( keras_variable=decoder_layer._self_attention_layernorm.scale, hf_weight_key=f"model.layers.{index}.input_layernorm.weight", - hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float32), + hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float16), ) loader.port_weight( keras_variable=decoder_layer._feedforward_layernorm.scale, hf_weight_key=f"model.layers.{index}.post_attention_layernorm.weight", - hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float32), + hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float16), ) # Attention layers @@ -71,28 +71,28 @@ def convert_weights(backbone, loader): keras_variable=decoder_layer._self_attention_layer._query_dense.kernel, hf_weight_key=f"model.layers.{index}.self_attn.q_proj.weight", hook_fn=lambda hf_tensor, keras_shape: np.reshape( - np.transpose(hf_tensor.astype(np.float32)), keras_shape + np.transpose(hf_tensor.astype(np.float16)), keras_shape ), ) loader.port_weight( keras_variable=decoder_layer._self_attention_layer._key_dense.kernel, hf_weight_key=f"model.layers.{index}.self_attn.k_proj.weight", hook_fn=lambda hf_tensor, keras_shape: np.reshape( - np.transpose(hf_tensor.astype(np.float32)), keras_shape + np.transpose(hf_tensor.astype(np.float16)), keras_shape ), ) loader.port_weight( keras_variable=decoder_layer._self_attention_layer._value_dense.kernel, hf_weight_key=f"model.layers.{index}.self_attn.v_proj.weight", hook_fn=lambda hf_tensor, keras_shape: np.reshape( - np.transpose(hf_tensor.astype(np.float32)), keras_shape + np.transpose(hf_tensor.astype(np.float16)), keras_shape ), ) loader.port_weight( keras_variable=decoder_layer._self_attention_layer._output_dense.kernel, hf_weight_key=f"model.layers.{index}.self_attn.o_proj.weight", hook_fn=lambda hf_tensor, keras_shape: np.reshape( - np.transpose(hf_tensor.astype(np.float32)), keras_shape + np.transpose(hf_tensor.astype(np.float16)), keras_shape ), ) @@ -101,21 +101,21 @@ def convert_weights(backbone, loader): keras_variable=decoder_layer._feedforward_gate_dense.kernel, hf_weight_key=f"model.layers.{index}.mlp.gate_proj.weight", hook_fn=lambda hf_tensor, _: np.transpose( - hf_tensor.astype(np.float32), axes=(1, 0) + hf_tensor.astype(np.float16), axes=(1, 0) ), ) loader.port_weight( keras_variable=decoder_layer._feedforward_intermediate_dense.kernel, hf_weight_key=f"model.layers.{index}.mlp.up_proj.weight", hook_fn=lambda hf_tensor, _: np.transpose( - hf_tensor.astype(np.float32), axes=(1, 0) + hf_tensor.astype(np.float16), axes=(1, 0) ), ) loader.port_weight( keras_variable=decoder_layer._feedforward_output_dense.kernel, hf_weight_key=f"model.layers.{index}.mlp.down_proj.weight", hook_fn=lambda hf_tensor, _: np.transpose( - hf_tensor.astype(np.float32), axes=(1, 0) + hf_tensor.astype(np.float16), axes=(1, 0) ), ) @@ -123,7 +123,7 @@ def convert_weights(backbone, loader): loader.port_weight( keras_variable=backbone.layer_norm.scale, hf_weight_key="model.norm.weight", - hook_fn=lambda hf_tensor, keras_variable: hf_tensor.astype(np.float32), + hook_fn=lambda hf_tensor, keras_variable: hf_tensor.astype(np.float16), ) return backbone From 3d77342a193daa244322ad15ac9c826859f6ef06 Mon Sep 17 00:00:00 2001 From: cosmo3769 Date: Wed, 21 Aug 2024 21:44:42 +0530 Subject: [PATCH 6/7] tiny-mistral-test --- keras_nlp/src/utils/transformers/convert_mistral.py | 8 ++++---- keras_nlp/src/utils/transformers/convert_mistral_test.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/keras_nlp/src/utils/transformers/convert_mistral.py b/keras_nlp/src/utils/transformers/convert_mistral.py index cf9ae4c4a4..aa80bad219 100644 --- a/keras_nlp/src/utils/transformers/convert_mistral.py +++ b/keras_nlp/src/utils/transformers/convert_mistral.py @@ -40,7 +40,7 @@ def convert_weights(backbone, loader): loader.port_weight( keras_variable=backbone.token_embedding.embeddings, hf_weight_key="model.embed_tokens.weight", - hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float16), + hook_fn=lambda hf_tensor, _: hf_tensor.astype(np.float16), ) loader.port_weight( keras_variable=backbone.token_embedding.reverse_embeddings, @@ -58,12 +58,12 @@ def convert_weights(backbone, loader): loader.port_weight( keras_variable=decoder_layer._self_attention_layernorm.scale, hf_weight_key=f"model.layers.{index}.input_layernorm.weight", - hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float16), + hook_fn=lambda hf_tensor, _: hf_tensor.astype(np.float16), ) loader.port_weight( keras_variable=decoder_layer._feedforward_layernorm.scale, hf_weight_key=f"model.layers.{index}.post_attention_layernorm.weight", - hook_fn=lambda hf_tensor, keras_shape: hf_tensor.astype(np.float16), + hook_fn=lambda hf_tensor, _: hf_tensor.astype(np.float16), ) # Attention layers @@ -123,7 +123,7 @@ def convert_weights(backbone, loader): loader.port_weight( keras_variable=backbone.layer_norm.scale, hf_weight_key="model.norm.weight", - hook_fn=lambda hf_tensor, keras_variable: hf_tensor.astype(np.float16), + hook_fn=lambda hf_tensor, _: hf_tensor.astype(np.float16), ) return backbone diff --git a/keras_nlp/src/utils/transformers/convert_mistral_test.py b/keras_nlp/src/utils/transformers/convert_mistral_test.py index 878f3d9c8f..82ac9eccc4 100644 --- a/keras_nlp/src/utils/transformers/convert_mistral_test.py +++ b/keras_nlp/src/utils/transformers/convert_mistral_test.py @@ -20,7 +20,7 @@ class TestTask(TestCase): @pytest.mark.large def test_convert_tiny_preset(self): - model = MistralCausalLM.from_preset("hf://mistralai/Mistral-7B-v0.1") + model = MistralCausalLM.from_preset("hf://cosmo3769/tiny-mistral-test") prompt = "What is your favorite condiment?" model.generate([prompt], max_length=15) From f82b4fdd7a77a8f8be6ae132142441d2f5d84905 Mon Sep 17 00:00:00 2001 From: cosmo3769 Date: Thu, 22 Aug 2024 02:48:57 +0530 Subject: [PATCH 7/7] removed dtype config --- keras_nlp/src/utils/transformers/convert_mistral.py | 1 - 1 file changed, 1 deletion(-) diff --git a/keras_nlp/src/utils/transformers/convert_mistral.py b/keras_nlp/src/utils/transformers/convert_mistral.py index aa80bad219..5a8b989a4a 100644 --- a/keras_nlp/src/utils/transformers/convert_mistral.py +++ b/keras_nlp/src/utils/transformers/convert_mistral.py @@ -31,7 +31,6 @@ def convert_backbone_config(transformers_config): "rope_max_wavelength": transformers_config["rope_theta"], "layer_norm_epsilon": transformers_config["rms_norm_eps"], "sliding_window": transformers_config["sliding_window"], - "dtype": transformers_config["torch_dtype"], }