From 63376790327d41e2239049bcac880ca5c3ab2b40 Mon Sep 17 00:00:00 2001 From: Vinkal Chudgar Date: Thu, 25 Sep 2025 18:35:26 +0000 Subject: [PATCH 1/2] minicpm: make GGUF scaling keys optional with legacy defaults Older MiniCPM GGUFs do not include the scaling metadata keys (minicpm.embedding_scale, minicpm.residual_scale, minicpm.logit_scale). The loader currently treats these as required, so quantization fails with: key not found in model: minicpm.embedding_scale This change restores backward compatibility by treating these keys as optional in the loader and using the older MiniCPM scaling values: embedding_scale = 12.0f residual_scale = 1.4f / sqrt(n_layer) logit_scale = 256.0f / n_embd When the GGUF provides the keys, their values override the defaults; otherwise the legacy defaults are used. Newer GGUFs that already include these keys are unaffected. Fixes: #16192 Signed-off-by: Vinkal Chudgar --- src/llama-model.cpp | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 48d9859c7d0be..4dc13edc7979b 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -675,10 +675,21 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_MINICPM: { + // Backward-compatible defaults for older MiniCPM GGUFs: + // historical MiniCPM graph constants: + // n_embd_base = 256, scale_embd = 12.0f, scale_depth = 1.4f + + hparams.f_embedding_scale = 12.0f; + const float scale_depth = 1.4f; + hparams.f_residual_scale = scale_depth / sqrtf((float) hparams.n_layer); + const float n_embd_base = 256.0f; + hparams.f_logit_scale = hparams.n_embd ? (n_embd_base / float(hparams.n_embd)) : 1.0f; ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); - ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); - ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + + // Optional KV reads, override defaults if present in newer GGUF exports + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale, /*required=*/false); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale, /*required=*/false); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale, /*required=*/false); // MiniCPM uses rope by default, unlike Granite which uses it as a switch hparams.rope_finetuned = true; From 28a4100364d25cfc113eb9c19df5727f7039015e Mon Sep 17 00:00:00 2001 From: Vinkal Date: Sat, 27 Sep 2025 00:11:14 +0530 Subject: [PATCH 2/2] Update src/llama-model.cpp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Committed as suggested. Thanks! Co-authored-by: Sigbjørn Skjæret --- src/llama-model.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 4dc13edc7979b..6fafe91f7d7f0 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -675,15 +675,11 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_MINICPM: { - // Backward-compatible defaults for older MiniCPM GGUFs: - // historical MiniCPM graph constants: - // n_embd_base = 256, scale_embd = 12.0f, scale_depth = 1.4f - + // Backward-compatible defaults for older MiniCPM GGUFs hparams.f_embedding_scale = 12.0f; - const float scale_depth = 1.4f; - hparams.f_residual_scale = scale_depth / sqrtf((float) hparams.n_layer); - const float n_embd_base = 256.0f; - hparams.f_logit_scale = hparams.n_embd ? (n_embd_base / float(hparams.n_embd)) : 1.0f; + hparams.f_residual_scale = 1.4f / sqrtf(float(hparams.n_layer)); + hparams.f_logit_scale = hparams.n_embd ? (256.0f / float(hparams.n_embd)) : 1.0f; + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); // Optional KV reads, override defaults if present in newer GGUF exports