|
|
|
FROM python:3.10-slim |
|
|
|
|
|
RUN apt-get update && \ |
|
apt-get install -y \ |
|
libopenblas-dev \ |
|
ninja-build \ |
|
build-essential \ |
|
pkg-config \ |
|
curl |
|
|
|
RUN pip install -U pip setuptools wheel && \ |
|
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 pip install --verbose llama-cpp-python[server] |
|
|
|
|
|
RUN mkdir model && \ |
|
curl -L https://huggingface.co/TheBloke/orca_mini_v3_13B-GGML/resolve/main/orca_mini_v3_13b.ggmlv3.q5_K_S.bin -o model/ggmlv3-model.bin |
|
|
|
COPY ./start_server.sh ./ |
|
COPY ./main.py ./ |
|
COPY ./README.md ./ |
|
|
|
|
|
RUN chmod +x ./start_server.sh |
|
|
|
|
|
ENV HOST=0.0.0.0 |
|
ENV PORT=7860 |
|
|
|
|
|
EXPOSE ${PORT} |
|
|
|
|
|
CMD ["/bin/sh", "./start_server.sh"] |