From e1bfe51cfc43a0cc99edd5d1b7c21fb270459ee7 Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Thu, 20 Nov 2025 20:53:48 -0500 Subject: [PATCH 1/2] Detect GigaChat3-10-A1.8B as deepseek lite Hardcodes checking number of layers to detect if lite version of deepseek. --- src/llama-model.cpp | 4 ++-- src/models/deepseek2.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e703181a19804..30902a59d5ca8 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1593,7 +1593,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_DEEPSEEK2: { - bool is_lite = (hparams.n_layer == 27); + bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); if (!is_lite) { @@ -4581,7 +4581,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } break; case LLM_ARCH_DEEPSEEK2: { - const bool is_lite = (hparams.n_layer == 27); + const bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26); const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0); diff --git a/src/models/deepseek2.cpp b/src/models/deepseek2.cpp index 68f72f72bb643..507926af539c3 100644 --- a/src/models/deepseek2.cpp +++ b/src/models/deepseek2.cpp @@ -4,7 +4,7 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { - bool is_lite = (hparams.n_layer == 27); + bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26); const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0); From 8efaefde1d1d3662c048e4a403511a26ee82c8cb Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Fri, 21 Nov 2025 08:36:39 -0500 Subject: [PATCH 2/2] Add commnent identifying deepseek lite variants deepseek lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B --- src/llama-model.cpp | 2 ++ src/models/deepseek2.cpp | 1 + 2 files changed, 3 insertions(+) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 30902a59d5ca8..175549a9e30f1 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1593,6 +1593,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_DEEPSEEK2: { + // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); @@ -4581,6 +4582,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } break; case LLM_ARCH_DEEPSEEK2: { + // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B const bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26); const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0); diff --git a/src/models/deepseek2.cpp b/src/models/deepseek2.cpp index 507926af539c3..0b41f7ba8eb37 100644 --- a/src/models/deepseek2.cpp +++ b/src/models/deepseek2.cpp @@ -4,6 +4,7 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26); const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0);