vocab : add special infill tokens for CodeLlama (#11850)
* vocab : add special infill tokens for CodeLlama The commit adds the following special tokens for CodeLlama infill: - `▁<PRE>` - `▁<SUF>` - `▁<MID>` The motivation for this is that currently the infill example uses CodeLlama as a suggested model. But when using this model the following error is generated: ```console /llama.cpp-debug/examples/infill/infill.cpp:165: GGML_ASSERT(llama_vocab_fim_pre(vocab) >= 0) failed Could not attach to process. If your uid matches the uid of the target process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try again as the root user. For more details, see /etc/sysctl.d/10-ptrace.conf ptrace: Operation not permitted. No stack. The program is not being run. 305251 Aborted (core dumped) ./build/bin/llama-infill -t 10 -ngl 0 -m models/codellama-13b.Q5_K_S.gguf \ -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 \ --in-prefix "def helloworld():\n print(\"hell" \ --in-suffix "\n print(\"goodbye world\")\n " ``` * squash! vocab : add special infill tokens for CodeLlama Add _<EOT> as well.
This commit is contained in:
parent
250d7953e8
commit
c80a7759da
1 changed files with 5 additions and 0 deletions
|
@ -1807,6 +1807,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
||||||
|| t.first == "<end_of_turn>"
|
|| t.first == "<end_of_turn>"
|
||||||
|| t.first == "<|endoftext|>"
|
|| t.first == "<|endoftext|>"
|
||||||
|| t.first == "<EOT>"
|
|| t.first == "<EOT>"
|
||||||
|
|| t.first == "_<EOT>"
|
||||||
|| t.first == "<|end▁of▁sentence|>" // DeepSeek
|
|| t.first == "<|end▁of▁sentence|>" // DeepSeek
|
||||||
) {
|
) {
|
||||||
special_eot_id = t.second;
|
special_eot_id = t.second;
|
||||||
|
@ -1839,6 +1840,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
||||||
|| t.first == "<fim-prefix>"
|
|| t.first == "<fim-prefix>"
|
||||||
|| t.first == "<|fim▁begin|>" // DeepSeek
|
|| t.first == "<|fim▁begin|>" // DeepSeek
|
||||||
|| t.first == "<PRE>"
|
|| t.first == "<PRE>"
|
||||||
|
|| t.first == "▁<PRE>" // CodeLlama
|
||||||
) {
|
) {
|
||||||
special_fim_pre_id = t.second;
|
special_fim_pre_id = t.second;
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
|
@ -1856,6 +1858,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
||||||
|| t.first == "<fim-suffix>"
|
|| t.first == "<fim-suffix>"
|
||||||
|| t.first == "<|fim▁hole|>" // DeepSeek
|
|| t.first == "<|fim▁hole|>" // DeepSeek
|
||||||
|| t.first == "<SUF>"
|
|| t.first == "<SUF>"
|
||||||
|
|| t.first == "▁<SUF>" // CodeLlama
|
||||||
) {
|
) {
|
||||||
special_fim_suf_id = t.second;
|
special_fim_suf_id = t.second;
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
|
@ -1873,6 +1876,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
||||||
|| t.first == "<fim-middle>"
|
|| t.first == "<fim-middle>"
|
||||||
|| t.first == "<|fim▁end|>" // DeepSeek
|
|| t.first == "<|fim▁end|>" // DeepSeek
|
||||||
|| t.first == "<MID>"
|
|| t.first == "<MID>"
|
||||||
|
|| t.first == "▁<MID>" // CodeLlama
|
||||||
) {
|
) {
|
||||||
special_fim_mid_id = t.second;
|
special_fim_mid_id = t.second;
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
|
@ -1957,6 +1961,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
||||||
|| t.first == "<|endoftext|>"
|
|| t.first == "<|endoftext|>"
|
||||||
|| t.first == "<|eom_id|>"
|
|| t.first == "<|eom_id|>"
|
||||||
|| t.first == "<EOT>"
|
|| t.first == "<EOT>"
|
||||||
|
|| t.first == "_<EOT>"
|
||||||
) {
|
) {
|
||||||
special_eog_ids.insert(t.second);
|
special_eog_ids.insert(t.second);
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue