Move GLM4 f32 attention fix to the correct function (#13750)
This commit is contained in:
parent
4c32832c59
commit
259469c4b5
1 changed files with 4 additions and 4 deletions
|
@ -1287,6 +1287,10 @@ ggml_tensor * llm_graph_context::build_attn(
|
||||||
|
|
||||||
if (wo) {
|
if (wo) {
|
||||||
cur = build_lora_mm(wo, cur);
|
cur = build_lora_mm(wo, cur);
|
||||||
|
if (arch == LLM_ARCH_GLM4) {
|
||||||
|
// GLM4 seems to have numerical issues with half-precision accumulators
|
||||||
|
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wo_b) {
|
if (wo_b) {
|
||||||
|
@ -1367,10 +1371,6 @@ ggml_tensor * llm_graph_context::build_attn(
|
||||||
|
|
||||||
if (wo) {
|
if (wo) {
|
||||||
cur = build_lora_mm(wo, cur);
|
cur = build_lora_mm(wo, cur);
|
||||||
if (arch == LLM_ARCH_GLM4) {
|
|
||||||
// GLM4 seems to have numerical issues with half-precision accumulators
|
|
||||||
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wo_b) {
|
if (wo_b) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue