File size: 117 Bytes
73cc25e |
1 2 3 4 5 6 7 |
#!/bin/sh
# For mlock support
ulimit -l unlimited
python3 -B -m llama_cpp.server --model model/ggml-model-q4_0.bin
|
73cc25e |
1 2 3 4 5 6 7 |
#!/bin/sh
# For mlock support
ulimit -l unlimited
python3 -B -m llama_cpp.server --model model/ggml-model-q4_0.bin
|