llama : print hint when loading a model when no backends are loaded (#13589)
This commit is contained in:
parent
7c07ac244d
commit
5364ae4ba5
1 changed files with 5 additions and 0 deletions
|
@ -140,6 +140,11 @@ static struct llama_model * llama_model_load_from_file_impl(
|
||||||
struct llama_model_params params) {
|
struct llama_model_params params) {
|
||||||
ggml_time_init();
|
ggml_time_init();
|
||||||
|
|
||||||
|
if (!params.vocab_only && ggml_backend_reg_count() == 0) {
|
||||||
|
LLAMA_LOG_ERROR("%s: no backends are loaded. hint: use ggml_backend_load() or ggml_backend_load_all() to load a backend before calling this function\n", __func__);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
unsigned cur_percentage = 0;
|
unsigned cur_percentage = 0;
|
||||||
if (params.progress_callback == NULL) {
|
if (params.progress_callback == NULL) {
|
||||||
params.progress_callback_user_data = &cur_percentage;
|
params.progress_callback_user_data = &cur_percentage;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue