File size: 687 Bytes
092a975 f125926 495083b 06e89e2 a9ce82d 53791da 092a975 a60ec09 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
FROM python:3.12
RUN useradd -m -u 1000 app
WORKDIR /home/app
RUN apt update
RUN apt install -y wget make cmake clang git g++
RUN wget https://huggingface.co/mradermacher/bellman-mistral-7b-instruct-v0.3-GGUF/resolve/main/bellman-mistral-7b-instruct-v0.3.Q5_K_M.gguf?download=true -O model.gguf
RUN git clone https://github.com/ggerganov/llama.cpp
RUN mv llama.cpp llama_temp
RUN mv llama_temp/* .
RUN cmake -B build
RUN cmake --build build --config Release
#RUN apt install socat -y
#RUN ls -R | grep -i "llama-server"
#RUN chmod +x ./build/bin/llama-server
EXPOSE 7860
CMD ["sh", "-c", "./build/bin/llama-server -m /home/app/model.gguf -c 4096 -n 1024 --host 0.0.0.0 --port 7860"] |