Skip to content

Commit

Permalink
make github CI happy
Browse files Browse the repository at this point in the history
  • Loading branch information
zhouwg committed Apr 21, 2024
1 parent 9cba545 commit 6496143
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 3 deletions.
6 changes: 4 additions & 2 deletions ggml-backend.c
Original file line number Diff line number Diff line change
Expand Up @@ -1238,7 +1238,8 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
struct ggml_init_params params = {
/* .mem_size = */ sizeof(sched->context_buffer),
/* .mem_buffer = */ sched->context_buffer,
/* .no_alloc = */ true
/* .no_alloc = */ true,
/* .use_hwaccel =*/ false
};

ggml_free(sched->ctx);
Expand Down Expand Up @@ -1980,7 +1981,8 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
struct ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
/* .mem_buffer = */ NULL,
/* .no_alloc = */ true
/* .no_alloc = */ true,
/* .use_hwaccel =*/ false
};

struct ggml_context * ctx_allocated = ggml_init(params);
Expand Down
7 changes: 6 additions & 1 deletion llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2378,6 +2378,7 @@ static bool llama_kv_cache_init(
/*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
Expand Down Expand Up @@ -4664,6 +4665,7 @@ static bool llm_load_tensors(
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
Expand Down Expand Up @@ -6535,6 +6537,7 @@ struct llm_build_context {
/*.mem_size =*/ buf_compute_meta.size(),
/*.mem_buffer =*/ buf_compute_meta.data(),
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};

ctx0 = ggml_init(params);
Expand Down Expand Up @@ -14679,6 +14682,7 @@ static int llama_apply_lora_from_file_internal(
/* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
/* .mem_buffer */ nullptr,
/* .no_alloc */ true,
/* .use_hwaccel*/ false
};
ggml_context * lora_ctx = ggml_init(lora_init_params);
if (lora_ctx == nullptr) {
Expand Down Expand Up @@ -14929,7 +14933,7 @@ void llama_backend_init(void) {

// needed to initialize f16 tables
{
struct ggml_init_params params = { 0, NULL, false };
struct ggml_init_params params = { 0, NULL, false, false };
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
Expand Down Expand Up @@ -15540,6 +15544,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
/*.mem_size =*/ n_layers * ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
Expand Down

0 comments on commit 6496143

Please sign in to comment.