diff --git a/examples/models/llama/export_llama_lib.py b/examples/models/llama/export_llama_lib.py index 07de9f3fa75..d0e45553957 100644 --- a/examples/models/llama/export_llama_lib.py +++ b/examples/models/llama/export_llama_lib.py @@ -93,7 +93,8 @@ "llama3_1", "llama3_2", "static_llama", - "qwen2_5", + "qwen2_5_0_5b", + "qwen2_5_1_5b", "qwen3_0_6b", "qwen3_1_7b", "qwen3_4b", @@ -102,7 +103,8 @@ ] TORCHTUNE_DEFINED_MODELS = ["llama3_2_vision"] HUGGING_FACE_REPO_IDS = { - "qwen2_5": "Qwen/Qwen2.5-1.5B", + "qwen2_5_0_5b": "Qwen/Qwen2.5-0.5B", + "qwen2_5_1_5b": "Qwen/Qwen2.5-1.5B", "phi_4_mini": "microsoft/Phi-4-mini-instruct", "smollm2": "HuggingFaceTB/SmolLM-135M", "qwen3_0_6b": "Qwen/Qwen3-0.6B", @@ -595,7 +597,7 @@ def export_llama( model_name = llm_config.base.model_class.value if not llm_config.base.checkpoint and model_name in HUGGING_FACE_REPO_IDS: repo_id = HUGGING_FACE_REPO_IDS[model_name] - if model_name == "qwen2_5": + if model_name.startswith("qwen2_5"): from executorch.examples.models.qwen2_5 import convert_weights elif model_name.startswith("qwen3"): from executorch.examples.models.qwen3 import convert_weights diff --git a/examples/models/qwen2_5/README.md b/examples/models/qwen2_5/README.md index c58807b46cb..11c92f4bd73 100644 --- a/examples/models/qwen2_5/README.md +++ b/examples/models/qwen2_5/README.md @@ -6,9 +6,10 @@ Qwen 2.5 is the latest iteration of the Qwen series of large language models (LL Qwen 2.5 uses the same example code as Llama, while the checkpoint, model params, and tokenizer are different. Please see the [Llama README page](../llama/README.md) for details. All commands for exporting and running Llama on various backends should also be applicable to Qwen 2.5, by swapping the following args: + ``` -base.model_class="qwen2_5" -base.params="examples/models/qwen2_5/config/1_5b_config.json" +base.model_class=[qwen2_5_0_5b, qwen2_5_1_5b] +base.params=[examples/models/qwen2_5/config/0_5b_config.json, examples/models/qwen2_5/config/1_5b_config.json] base.checkpoint= ``` @@ -34,7 +35,7 @@ QWEN_CHECKPOINT=path/to/checkpoint.pth python -m extension.llm.export.export_llm \ --config examples/models/qwen2_5/config/qwen2_5_xnnpack_q8da4w.yaml \ - +base.model_class="qwen2_5" \ + +base.model_class="qwen2_5_1_5b" \ +base.checkpoint="${QWEN_CHECKPOINT:?}" \ +base.params="examples/models/qwen2_5/config/1_5b_config.json" \ +export.output_name="qwen2_5-1_5b.pte" \ diff --git a/extension/llm/export/config/llm_config.py b/extension/llm/export/config/llm_config.py index 8f8646e88cc..839ef7bc730 100644 --- a/extension/llm/export/config/llm_config.py +++ b/extension/llm/export/config/llm_config.py @@ -37,7 +37,8 @@ class ModelType(str, Enum): llama3_2 = "llama3_2" llama3_2_vision = "llama3_2_vision" static_llama = "static_llama" - qwen2_5 = "qwen2_5" + qwen2_5_0_5b = "qwen2_5_0_5b" + qwen2_5_1_5b = "qwen2_5_1_5b" qwen3_0_6b = "qwen3_0_6b" qwen3_1_7b = "qwen3_1_7b" qwen3_4b = "qwen3_4b"