Skip to content

Commit

Permalink
Update unraid template
Browse files Browse the repository at this point in the history
  • Loading branch information
edgar971 committed Aug 22, 2023
1 parent bffe7d0 commit df18d4a
Showing 1 changed file with 6 additions and 7 deletions.
13 changes: 6 additions & 7 deletions unraid-template/unraid.xml
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,14 @@
<ExtraParams>--gpus all</ExtraParams>
<PostArgs/>
<CPUset/>
<DateInstalled>1692587146</DateInstalled>
<DateInstalled>1692670577</DateInstalled>
<DonateText/>
<DonateLink/>
<Requires/>
<Config Name="Local Model Path" Target="MODEL" Default="/models/llama-2-13b-chat.bin" Mode="" Description="The local model path" Type="Variable" Display="always" Required="true" Mask="false">/models/llama-2-7b-chat.bin</Config>
<Config Name="Default Model UI" Target="DEFAULT_MODEL" Default="/models/llama-2-13b-chat.bin" Mode="" Description="Should be same as $MODEL" Type="Variable" Display="always" Required="true" Mask="false">/models/llama-2-7b-chat.bin</Config>
<Config Name="Model Download URL" Target="MODEL_DOWNLOAD_URL" Default="https://huggingface.co/TheBloke/Nous-Hermes-Llama2-GGML/resolve/main/nous-hermes-llama2-13b.ggmlv3.q4_0.bin" Mode="" Description="GGML Model Binary. " Type="Variable" Display="always" Required="true" Mask="false">https://huggingface.co/TheBloke/Nous-Hermes-Llama-2-7B-GGML/resolve/main/nous-hermes-llama-2-7b.ggmlv3.q4_0.bin</Config>
<Config Name="Model Directory" Target="/models" Default="/mnt/user/appdata/models" Mode="rw" Description="The local model directory to use as a cache" Type="Path" Display="always" Required="true" Mask="false">/mnt/user/appdata/models</Config>
<Config Name="Web UI" Target="3000" Default="3000" Mode="tcp" Description="Chat UI Port" Type="Port" Display="always" Required="false" Mask="false">3005</Config>
<Config Name="API Port" Target="8000" Default="8000" Mode="tcp" Description="HTTP API Port" Type="Port" Display="always" Required="false" Mask="false">8008</Config>
<Config Name="Local Model Path" Target="MODEL" Default="/models/llama-2-13b-chat.bin" Mode="" Description="The local model path" Type="Variable" Display="always" Required="true" Mask="false"/>
<Config Name="Model Download URL" Target="MODEL_DOWNLOAD_URL" Default="https://huggingface.co/TheBloke/Nous-Hermes-Llama2-GGML/resolve/main/nous-hermes-llama2-13b.ggmlv3.q4_0.bin" Mode="" Description="GGML Model Binary. " Type="Variable" Display="always" Required="false" Mask="false"/>
<Config Name="Model Directory" Target="/models" Default="/mnt/user/appdata/models" Mode="rw" Description="The local model directory to use as a cache" Type="Path" Display="always" Required="false" Mask="false"/>
<Config Name="Web UI" Target="3000" Default="3000" Mode="tcp" Description="Chat UI Port" Type="Port" Display="always" Required="false" Mask="false"/>
<Config Name="API Port" Target="8000" Default="8000" Mode="tcp" Description="HTTP API Port" Type="Port" Display="always" Required="false" Mask="false"/>
<Config Name="Number Of GPU Layers" Target="N_GPU_LAYERS" Default="" Mode="" Description="Layers to offload to GPU" Type="Variable" Display="advanced" Required="false" Mask="false">64</Config>
</Container>

0 comments on commit df18d4a

Please sign in to comment.