llama : deprecate llama_kv_self_ API (#14030)
* llama : deprecate llama_kv_self_ API ggml-ci * llama : allow llama_memory_(nullptr) ggml-ci * memory : add flag for optional data clear in llama_memory_clear ggml-ci
This commit is contained in:
parent
487a5e0401
commit
745aa5319b
34 changed files with 206 additions and 127 deletions
|
@ -217,7 +217,7 @@ int main(int argc, char ** argv) {
|
|||
{
|
||||
LOG_DBG("clear kv cache from any extra tokens, n_past = %d\n", n_past);
|
||||
|
||||
llama_kv_self_seq_rm(ctx_tgt, 0, n_past, -1);
|
||||
llama_memory_seq_rm(llama_get_memory(ctx_tgt), 0, n_past, -1);
|
||||
}
|
||||
|
||||
if ((params.n_predict >= 0 && n_predict > params.n_predict) || has_eos) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue