Spaces:
Running
on
A10G
Running
on
A10G
File size: 474 Bytes
feaa097 04bf0e1 feaa097 04bf0e1 feaa097 3683deb 3e138d7 04bf0e1 4c4c78d 3683deb feaa097 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
#!/bin/bash
if [ ! -d "llama.cpp" ]; then
# only run in dev env
git clone https://github.com/ggerganov/llama.cpp
fi
export GGML_CUDA=OFF
if [[ -z "${RUN_LOCALLY}" ]]; then
# enable CUDA if NOT running locally
export GGML_CUDA=ON
fi
cd llama.cpp
cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA}
cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
cp ./build/bin/llama-* .
rm -rf build
cd ..
python app.py
|