Load all MoE experts during warmup (#11571)
* llama : introduce llama_set_warmup() API call that controls warmup mode; use all MoE experts during warmup * common : use new API to enable warmup mode during model warmup --------- Co-authored-by: Stanisław Szymczyk <sszymczy@gmail.com>
This commit is contained in:
parent
add2a3aa5a
commit
8fcb563613
6 changed files with 22 additions and 2 deletions
|
@ -39,6 +39,7 @@ llama_context::llama_context(
|
|||
cparams.flash_attn = params.flash_attn;
|
||||
cparams.no_perf = params.no_perf;
|
||||
cparams.pooling_type = params.pooling_type;
|
||||
cparams.warmup = false;
|
||||
|
||||
cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
|
||||
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
|
||||
|
@ -948,6 +949,12 @@ void llama_context::set_causal_attn(bool value) {
|
|||
cparams.causal_attn = value;
|
||||
}
|
||||
|
||||
void llama_context::set_warmup(bool value) {
|
||||
LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
|
||||
|
||||
cparams.warmup = value;
|
||||
}
|
||||
|
||||
void llama_context::set_adapter_lora(
|
||||
llama_adapter_lora * adapter,
|
||||
float scale) {
|
||||
|
@ -1594,7 +1601,7 @@ void llama_context::output_reorder() {
|
|||
//
|
||||
|
||||
int32_t llama_context::graph_max_nodes() const {
|
||||
return std::max<int32_t>(8192, 5*model.n_tensors());
|
||||
return std::max<int32_t>(65536, 5*model.n_tensors());
|
||||
}
|
||||
|
||||
ggml_cgraph * llama_context::graph_init() {
|
||||
|
@ -2372,6 +2379,10 @@ void llama_set_causal_attn(llama_context * ctx, bool causal_attn) {
|
|||
ctx->set_causal_attn(causal_attn);
|
||||
}
|
||||
|
||||
void llama_set_warmup(llama_context * ctx, bool warmup) {
|
||||
ctx->set_warmup(warmup);
|
||||
}
|
||||
|
||||
void llama_synchronize(llama_context * ctx) {
|
||||
ctx->synchronize();
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue