Add --no-op-offload to improve -ot pp perf in MoE models like llama4 400B (#13386)

This commit is contained in:
David Huang 2025-05-11 20:18:39 +08:00 committed by GitHub
parent 3eac209319
commit 7f323a589f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 57 additions and 9 deletions

View file

@ -30,6 +30,7 @@ struct llama_cparams {
bool flash_attn;
bool no_perf;
bool warmup;
bool op_offload;
enum llama_pooling_type pooling_type;