mtmd : rename llava directory to mtmd (#13311)

* mv llava to mtmd

* change ref everywhere
This commit is contained in:
Xuan-Son Nguyen 2025-05-05 16:02:55 +02:00 committed by GitHub
parent 5215b91e93
commit 9b61acf060
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 44 additions and 44 deletions

View file

@ -2211,14 +2211,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
add_opt(common_arg(
{"--mmproj"}, "FILE",
"path to a multimodal projector file. see tools/llava/README.md",
"path to a multimodal projector file. see tools/mtmd/README.md",
[](common_params & params, const std::string & value) {
params.mmproj.path = value;
}
).set_examples(mmproj_examples));
add_opt(common_arg(
{"--mmproj-url"}, "URL",
"URL to a multimodal projector file. see tools/llava/README.md",
"URL to a multimodal projector file. see tools/mtmd/README.md",
[](common_params & params, const std::string & value) {
params.mmproj.url = value;
}

View file

@ -340,7 +340,7 @@ struct common_params {
common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
// multimodal models (see tools/llava)
// multimodal models (see tools/mtmd)
struct common_params_model mmproj;
bool mmproj_use_gpu = true; // use GPU for multimodal model
bool no_mmproj = false; // explicitly disable multimodal model