From 5364ae4ba53cc6367b8c8bf78876839122ca4e57 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Fri, 16 May 2025 07:38:07 -0700 Subject: [PATCH] llama : print hint when loading a model when no backends are loaded (#13589) --- src/llama.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/llama.cpp b/src/llama.cpp index 9fdddf7b..2f06e0f8 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -140,6 +140,11 @@ static struct llama_model * llama_model_load_from_file_impl( struct llama_model_params params) { ggml_time_init(); + if (!params.vocab_only && ggml_backend_reg_count() == 0) { + LLAMA_LOG_ERROR("%s: no backends are loaded. hint: use ggml_backend_load() or ggml_backend_load_all() to load a backend before calling this function\n", __func__); + return nullptr; + } + unsigned cur_percentage = 0; if (params.progress_callback == NULL) { params.progress_callback_user_data = &cur_percentage;