orca_mini_v3_13B-GGML / start_server.sh
limcheekin's picture
feat: first import
73cc25e
raw
history blame
117 Bytes
#!/bin/sh
# For mlock support
ulimit -l unlimited
python3 -B -m llama_cpp.server --model model/ggml-model-q4_0.bin