From d5c6309d91cb22ebc947920f92eb686d92f84eae Mon Sep 17 00:00:00 2001 From: Csaba Kecskemeti Date: Thu, 27 Mar 2025 03:11:23 -0700 Subject: [PATCH] convert : Support Qwen2_5_VLForConditionalGeneration (#12595) --- convert_hf_to_gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 52637c42..a06010a7 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2269,7 +2269,7 @@ class Qwen2Model(Model): self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) -@Model.register("Qwen2VLForConditionalGeneration") +@Model.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration") class Qwen2VLModel(Model): model_arch = gguf.MODEL_ARCH.QWEN2VL