From ffad04397399ea1650fda6560c7c753059804876 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 13 Jun 2025 11:18:25 +0300 Subject: [PATCH] server : fix SWA condition for full context reprocess (#14163) ggml-ci --- tools/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 8efb8b70..b439d8b1 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -3217,7 +3217,7 @@ struct server_context { } const auto n_swa = llama_model_n_swa(model); - if (pos_min > slot.n_past - n_swa) { + if (pos_min > std::max(0, slot.n_past - n_swa)) { SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa); SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n", "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");