From 7b8858415e8584ef00423cc22e652d4c30c3726f Mon Sep 17 00:00:00 2001 From: hx507 <72583014+hx507@users.noreply.github.com> Date: Fri, 17 Mar 2023 05:11:49 +0800 Subject: [PATCH] Scale buf_size linearly with n_ctx This appear to solve https://github.com/ggerganov/llama.cpp/issues/153 where error of "ggml_new_tensor_impl: not enough space in the context's memory pool" is thrown in interactive mode. At least the out of memory error come from `ctx0` used here. Although I am not familiar with the code base enough to tell if this is indeed the cause. --- main.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/main.cpp b/main.cpp index ca0fca8b36455..0c17b30354489 100644 --- a/main.cpp +++ b/main.cpp @@ -549,9 +549,7 @@ bool llama_eval( const int d_key = n_embd/n_head; - // TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case - // static size_t buf_size = hparams.n_ctx*1024*1024; - static size_t buf_size = 512u*1024*1024; + static size_t buf_size = (size_t)hparams.n_ctx*1024*1024; static void * buf = malloc(buf_size); if (mem_per_token > 0 && mem_per_token*N > buf_size) {