From 02082f1519565fc7b49de211b28bc5404a69209b Mon Sep 17 00:00:00 2001 From: Ivy233 <952254420@qq.com> Date: Wed, 26 Mar 2025 22:06:04 +0800 Subject: [PATCH] clip: Fix llama-llava-clip-quantize-cli quantization error under CUDA backend (#12566) * [Fix] Compiling clip-quantize-cli and running it in a CUDA environment will cause ggml_fp16_to_fp32 to report an error when trying to access video memory. You need to switch to the CPU backend to run quantize. After the fix, it will automatically run in the CPU backend and will no longer be bound to CUDA. * [Fix]Roll back the signature and implementation of clip_model_load, and change the call in clip_model_quantize to clip_init. --- examples/llava/clip.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index a1f050e3..58ee5cf0 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -2989,7 +2989,10 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i assert(itype < GGML_TYPE_COUNT); ggml_type type = static_cast(itype); - auto * ctx_clip = clip_model_load(fname_inp, 2); + auto * ctx_clip = clip_init(fname_inp, clip_context_params{ + /* use_gpu */ false, + /* verbosity */ 2, + }); const auto & ctx_src = ctx_clip->ctx_gguf; const auto & ctx_data = ctx_clip->ctx_data;