parent
797990c4bc
commit
8a1d206f1d
2 changed files with 5 additions and 2 deletions
|
@ -13189,6 +13189,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
|
|||
case LLM_ARCH_JINA_BERT_V2:
|
||||
case LLM_ARCH_NOMIC_BERT:
|
||||
case LLM_ARCH_NOMIC_BERT_MOE:
|
||||
case LLM_ARCH_WAVTOKENIZER_DEC:
|
||||
{
|
||||
res = nullptr;
|
||||
} break;
|
||||
|
|
|
@ -579,6 +579,8 @@ int main(int argc, char ** argv) {
|
|||
|
||||
params.model = params.vocoder.model;
|
||||
params.embedding = true;
|
||||
params.ctx_shift = false; // silence warning
|
||||
params.n_ubatch = params.n_batch;
|
||||
|
||||
common_init_result llama_init_cts = common_init_from_params(params);
|
||||
|
||||
|
@ -1020,8 +1022,8 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14
|
|||
}
|
||||
GGML_ASSERT(batch.n_tokens == n_codes);
|
||||
|
||||
if (llama_decode(ctx_cts, batch) != 0) {
|
||||
LOG_ERR("%s: llama_decode() failed\n", __func__);
|
||||
if (llama_encode(ctx_cts, batch) != 0) {
|
||||
LOG_ERR("%s: llama_encode() failed\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue