llama : fix OLMo-2-0325-32B-Instruct K-norm size (#12400)
This commit is contained in:
parent
dc079cfdff
commit
8ba95dca20
1 changed files with 4 additions and 1 deletions
|
@ -1005,6 +1005,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
||||||
case 16: type = LLM_TYPE_1B; break;
|
case 16: type = LLM_TYPE_1B; break;
|
||||||
case 32: type = LLM_TYPE_7B; break;
|
case 32: type = LLM_TYPE_7B; break;
|
||||||
case 40: type = LLM_TYPE_13B; break;
|
case 40: type = LLM_TYPE_13B; break;
|
||||||
|
case 64: type = LLM_TYPE_32B; break;
|
||||||
default: type = LLM_TYPE_UNKNOWN;
|
default: type = LLM_TYPE_UNKNOWN;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
@ -2726,6 +2727,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
||||||
} break;
|
} break;
|
||||||
case LLM_ARCH_OLMO2:
|
case LLM_ARCH_OLMO2:
|
||||||
{
|
{
|
||||||
|
const int64_t n_embd_head = n_embd / n_head;
|
||||||
|
|
||||||
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
|
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
|
||||||
|
|
||||||
// output
|
// output
|
||||||
|
@ -2740,7 +2743,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
||||||
layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
|
layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
|
||||||
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
|
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
|
||||||
layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
|
layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
|
||||||
layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
|
layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_head_kv * n_embd_head}, 0);
|
||||||
layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
|
layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
|
||||||
|
|
||||||
layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
|
layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue