ggml-opencl, llama: using reserve() if count already known (#7272)

This commit is contained in:
Herman Semenov 2024-05-20 07:33:21 +00:00 committed by GitHub
parent 65c58207ec
commit 213e90ed73
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 8 additions and 2 deletions

View file

@ -16162,6 +16162,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
}
// make tensors
cvec.tensors.reserve(model.hparams.n_layer);
cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
for (size_t il = 1; il < model.hparams.n_layer; il++) {
struct ggml_context * ctx = ctx_map.at(model.buft_layer[il].buft);
@ -16170,6 +16171,8 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
}
// allocate tensors / buffers and zero
cvec.ctxs.reserve(ctx_map.size());
cvec.bufs.reserve(ctx_map.size());
for (auto it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx = it.second;