Llama-3-8B-tr / config.yml
Nekochu's picture
Update config.yml
5a867b1 verified
raw
history blame
389 Bytes
---
hub:
repo_id: Nekochu/Llama-2-13B-fp16-french
filename: llama-2-13b-chat-FR-Q4_K_M.gguf
llama_cpp:
n_ctx: 4096
# n_gpu_layers: 40 # llama 13b has 40 layers
chat:
stop:
- "</s>"
- "<unk>"
- "### USER:"
- "USER:"
queue:
max_size: 16
concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app