llama : fix K-shift with quantized K and BLAS backend (#13113)
This commit is contained in:
parent
558a764713
commit
295354ea68
2 changed files with 4 additions and 16 deletions
|
@ -170,8 +170,7 @@ private:
|
|||
ggml_tensor * shift,
|
||||
ggml_tensor * factors,
|
||||
float freq_base,
|
||||
float freq_scale,
|
||||
ggml_backend_buffer * bbuf) const;
|
||||
float freq_scale) const;
|
||||
|
||||
llm_graph_result_ptr build_kv_self_shift(
|
||||
ggml_context * ctx0,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue