Skip to content

Commit 9ba2df7

Browse files
committed
revert: can't fix that bug locally
1 parent 9a17ac8 commit 9ba2df7

File tree

2 files changed

+2
-10
lines changed

2 files changed

+2
-10
lines changed

kani/engines/llamacpp/base.py

+1-9
Original file line numberDiff line numberDiff line change
@@ -65,18 +65,10 @@ def __init__(
6565
if match := re.match(r"(.*?)-(\d+)-of-(\d+)\.gguf", filename):
6666
log.info("Sharded GGUF file given - ensuring that all GGUF shards are downloaded")
6767
additional_files = []
68-
# there is a bug in llama-cpp-python that makes the additional_files inherit the subfolder of the parent
69-
*subfolders, basename = match[1].split("/")
70-
if subfolders:
71-
log.warning(
72-
"llama-cpp-python can fail to find additional model files in subfolders. If you see a 404 error,"
73-
' try manually supplying `model_load_kwargs={"additional_files": [...]}` or use huggingface-cli to'
74-
" download model files."
75-
)
7668
for n in range(1, int(match[3]) + 1):
7769
if n == int(match[2]):
7870
continue
79-
additional_files.append(f"{basename}-*{n}-of-{match[3]}.gguf")
71+
additional_files.append(f"{match[1]}-*{n}-of-{match[3]}.gguf")
8072
log.info(f"additional_files={additional_files}")
8173
model_load_kwargs.setdefault("additional_files", additional_files)
8274

sandbox/r1-quant.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
repo_id="unsloth/DeepSeek-R1-GGUF",
2727
filename="DeepSeek-R1-Q2_K_XS/DeepSeek-R1-Q2_K_XS-00001-of-00005.gguf",
2828
prompt_pipeline=pipeline,
29-
model_load_kwargs={"n_gpu_layers": -1},
29+
model_load_kwargs={"n_gpu_layers": -1, "additional_files": []},
3030
)
3131

3232

0 commit comments

Comments
 (0)