From 36667c8edcded08063ed51c7d57e9e086bbfc903 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Sat, 3 May 2025 20:07:54 +0200 Subject: [PATCH] =?UTF-8?q?clip=20:=20revert=20the=20change=20of=20BOI/EOI?= =?UTF-8?q?=20token=20for=20GLM-edge=20(=E2=9A=A0=EF=B8=8F=20breaking=20ch?= =?UTF-8?q?ange)=20(#13259)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tools/llava/clip-impl.h | 2 ++ tools/llava/clip.cpp | 26 +++++++++++++++++++------- tools/llava/mtmd.cpp | 6 +----- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/tools/llava/clip-impl.h b/tools/llava/clip-impl.h index b575ca4d..b78d930b 100644 --- a/tools/llava/clip-impl.h +++ b/tools/llava/clip-impl.h @@ -75,6 +75,8 @@ #define TN_MM_PROJECTOR "mm.model.fc.weight" // idefics3 #define TN_MM_PATCH_MERGER "mm.patch_merger.weight" // mistral small 3.1 #define TN_TOK_IMG_BREAK "v.token_embd.img_break" // pixtral +#define TN_TOK_GLM_BOI "adapter.boi" // glm-edge (these embeddings are not in text model) +#define TN_TOK_GLM_EOI "adapter.eoi" // glm-edge (these embeddings are not in text model) // mimicpmv #define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k" diff --git a/tools/llava/clip.cpp b/tools/llava/clip.cpp index 7607d4e3..3b60a526 100644 --- a/tools/llava/clip.cpp +++ b/tools/llava/clip.cpp @@ -249,9 +249,11 @@ struct clip_vision_model { struct ggml_tensor * mm_4_w = nullptr; struct ggml_tensor * mm_4_b = nullptr; - //GLMV-Edge projection + // GLMV-Edge projection struct ggml_tensor * mm_model_adapter_conv_w = nullptr; struct ggml_tensor * mm_model_adapter_conv_b = nullptr; + struct ggml_tensor * mm_glm_tok_boi = nullptr; + struct ggml_tensor * mm_glm_tok_eoi = nullptr; // MobileVLM projection struct ggml_tensor * mm_model_mlp_1_w = nullptr; @@ -1559,6 +1561,13 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im embeddings = ggml_mul(ctx0, embeddings,x); embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings); } + // arrangement of BOI/EOI token embeddings + // note: these embeddings are not present in text model, hence we cannot process them as text tokens + // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53 + { + embeddings = ggml_concat(ctx0, model.mm_glm_tok_boi, embeddings, 1); // BOI + embeddings = ggml_concat(ctx0, embeddings, model.mm_glm_tok_eoi, 1); // EOI + } } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) { @@ -1972,12 +1981,14 @@ struct clip_model_loader { { vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight")); vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias")); - vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR,"weight")); - vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"weight")); - vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"bias")); - vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H,"weight")); - vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE,"weight")); - vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H,"weight")); + vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight")); + vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight")); + vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias")); + vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight")); + vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight")); + vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight")); + vision_model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight")); + vision_model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight")); } break; case PROJECTOR_TYPE_QWEN2VL: case PROJECTOR_TYPE_QWEN25VL: @@ -2948,6 +2959,7 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2 || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) { n_patches /= 4; + n_patches += 2; // for BOI and EOI token embeddings } else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) { if (ctx->minicpmv_version == 2) { n_patches = 96; diff --git a/tools/llava/mtmd.cpp b/tools/llava/mtmd.cpp index d1d7530f..73abf2ad 100644 --- a/tools/llava/mtmd.cpp +++ b/tools/llava/mtmd.cpp @@ -189,11 +189,6 @@ int32_t mtmd_tokenize(mtmd_context * ctx, marker_modified = "" + ctx->image_marker + ""; string_replace_all(prompt_modified, ctx->image_marker, marker_modified); - } else if (proj_type == PROJECTOR_TYPE_GLM_EDGE) { - // <|begin_of_image|> ... (image embeddings) ... <|end_of_image|> - marker_modified = "<|begin_of_image|>" + ctx->image_marker + "<|end_of_image|>"; - string_replace_all(prompt_modified, ctx->image_marker, marker_modified); - } else if (proj_type == PROJECTOR_TYPE_IDEFICS3) { // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215 marker_modified = "" + ctx->image_marker + ""; @@ -213,6 +208,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, } // llava-1.5, llava-1.6, Yi-VL, Yi-34B, granite: don't need to add prefix and suffix + // for glm-edge, BOI and EOI token's embeddings are not present in the text model std::vector parts = string_split_str(prompt_modified, ctx->image_marker); output.clear();