vllm-inference / Dockerfile
yusufs's picture
feat(Dockerfile): install gcc
e8cd3e0
raw
history blame
1.44 kB
FROM python:3.12.7-slim-bookworm
RUN apt-get update && apt-get install -y \
build-essential \
git \
curl \
wget \
unzip \
gcc
RUN useradd -m -u 1000 user
USER user
ENV PATH="/home/user/.local/bin:$PATH"
WORKDIR /app
COPY --chown=user ./requirements.txt requirements.txt
RUN pip install --no-cache-dir -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu113
COPY --chown=user . /app
# Download at build time,
# to ensure during restart we won't have to wait for the download from HF (only wait for docker pull).
# In Docker Spaces, the secrets management is different for security reasons.
# Once you create a secret in the Settings tab,
# you can expose the secret by adding the following line in your Dockerfile:
#
# For example, if SECRET_EXAMPLE is the name of the secret you created in the Settings tab,
# you can read it at build time by mounting it to a file, then reading it with $(cat /run/secrets/SECRET_EXAMPLE).
# https://huggingface.co/docs/hub/en/spaces-sdks-docker#buildtime
#
# AFTER TRIAL AND ERROR WE GOT 16GB (16431849854 bytes) OF LAYERS :(
#
# RUN --mount=type=secret,id=HF_TOKEN,mode=0444,required=true HF_TOKEN=$(cat /run/secrets/HF_TOKEN) python /app/download_model.py
EXPOSE 7860
#CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
RUN chmod +x /app/runner.sh
RUN chmod +x /app/run-llama.sh
RUN chmod +x /app/run-sailor.sh
CMD ["/app/runner.sh"]