FROM python:3.11-slim RUN apt update && apt install -y git cmake build-essential ninja-build WORKDIR /app RUN git clone https://github.com/ggerganov/llama.cpp --depth 1 repo && cd repo && cmake -B build && cmake --build build --config Release --target server && cp ../build/bin/server /app/server RUN wget -c -o model.gguf "https://huggingface.co/zhangtao103239/Qwen-1.8B-GGUF/resolve/main/qwen-1.8b-q5_k_m.gguf" EXPOSE 7860 CMD ["/app/server", "-m" "model.gguf", "--host", "0.0.0.0", "--port", "7860"]