Load all MoE experts during warmup (#11571)
* llama : introduce llama_set_warmup() API call that controls warmup mode; use all MoE experts during warmup * common : use new API to enable warmup mode during model warmup --------- Co-authored-by: Stanisław Szymczyk <sszymczy@gmail.com>
This commit is contained in:
parent
add2a3aa5a
commit
8fcb563613
6 changed files with 22 additions and 2 deletions
|
@ -29,6 +29,7 @@ struct llama_cparams {
|
|||
bool offload_kqv;
|
||||
bool flash_attn;
|
||||
bool no_perf;
|
||||
bool warmup;
|
||||
|
||||
enum llama_pooling_type pooling_type;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue