llama : update llama_decode_internal ref [no ci] (#11840)

This commit updates the comment in llama_kv_cache.h to reflect the
change of the function name from llama_decode_internal to
llama_decode_impl.
This commit is contained in:
Daniel Bevenius 2025-02-13 07:07:51 +01:00 committed by GitHub
parent a394039db0
commit 3e69319772
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -37,7 +37,7 @@ struct llama_kv_cache {
bool can_shift = false;
// Note: The value of head isn't only used to optimize searching
// for a free KV slot. llama_decode_internal also uses it, so it
// for a free KV slot. llama_decode_impl also uses it, so it
// cannot be freely changed after a slot has been allocated.
uint32_t head = 0;
uint32_t size = 0;