From 9e31bec4fd53634c9e5b04650488a09a055f5dab Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 5 Jun 2025 09:06:29 +0300 Subject: [PATCH] context : fix pos_min initialization upon error decode (#14008) ggml-ci --- src/llama-context.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 7c1a642c..f1b43b9c 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1058,7 +1058,10 @@ int llama_context::decode(llama_batch & inp_batch) { if (!res) { // the last ubatch failed or was aborted -> remove all positions of that ubatch from the KV cache - llama_pos pos_min[LLAMA_MAX_PARALLEL_SEQUENCES] = { std::numeric_limits::max() }; + llama_pos pos_min[LLAMA_MAX_PARALLEL_SEQUENCES]; + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + pos_min[s] = std::numeric_limits::max(); + } for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { const auto & seq_id = ubatch.seq_id[i][0];