main : print total token count and tokens consumed so far (#4874)

* Token count changes

* Add show token count

* Updating before PR

* Two requested changes

* Move param def posn
This commit is contained in:
pudepiedj 2024-01-11 16:14:52 +00:00 committed by GitHub
parent 2f043328e3
commit 43f76bf1c3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 15 additions and 3 deletions

View file

@ -64,6 +64,7 @@ struct gpt_params {
int32_t n_beams = 0; // if non-zero then use beam search of given width.
int32_t grp_attn_n = 1; // group-attention factor
int32_t grp_attn_w = 512; // group-attention width
int32_t token_interval = 512; // show token count every 512 tokens
float rope_freq_base = 0.0f; // RoPE base frequency
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
@ -242,4 +243,3 @@ void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80);
// Dump the KV cache view showing individual sequences in each cell (long output).
void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);