Move GLM4 f32 attention fix to the correct function (#13750)

This commit is contained in:
0cc4m 2025-05-24 16:49:12 +02:00 committed by GitHub
parent 4c32832c59
commit 259469c4b5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -1287,6 +1287,10 @@ ggml_tensor * llm_graph_context::build_attn(
if (wo) {
cur = build_lora_mm(wo, cur);
if (arch == LLM_ARCH_GLM4) {
// GLM4 seems to have numerical issues with half-precision accumulators
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
}
}
if (wo_b) {
@ -1367,10 +1371,6 @@ ggml_tensor * llm_graph_context::build_attn(
if (wo) {
cur = build_lora_mm(wo, cur);
if (arch == LLM_ARCH_GLM4) {
// GLM4 seems to have numerical issues with half-precision accumulators
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
}
}
if (wo_b) {