llama : add option to override model tensor buffers (#11397)

* llama : add option to override tensor buffers

* ggml : fix possible underflow in ggml_nbytes
This commit is contained in:
Diego Devesa 2025-04-02 14:52:01 +02:00 committed by GitHub
parent a10b36c91a
commit e0e912f49b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 108 additions and 9 deletions

View file

@ -527,7 +527,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
}
std::vector<std::string> splits = {};
llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides);
llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides, nullptr);
ml.init_mappings(false); // no prefetching
llama_model model(llama_model_default_params());