llama : auto-batch preparation (#13845)
* llama : auto-batch ggml-ci * context : simplify if branching
This commit is contained in:
parent
51fa76f172
commit
3f55f781f1
5 changed files with 67 additions and 54 deletions
|
@ -392,7 +392,7 @@ int main(int argc, char ** argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
LOG_ERR("%s : failed to decode the batch, retrying with n_batch = %d\n", __func__, n_batch / 2);
|
||||
LOG_WRN("%s : failed to decode the batch, retrying with n_batch = %d\n", __func__, n_batch / 2);
|
||||
|
||||
n_cache_miss += 1;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue