Spaces:
Paused
Paused
File size: 2,174 Bytes
4ac8f37 b7c945f 186058d b7c945f 0c5837d b7c945f 90dab9b b7c945f 90dab9b b7c945f f516ae8 186058d 90dab9b b7c945f 88d3326 49feecf 90d89f1 0703f7d 86fb0a8 44db920 86fb0a8 87fd2e2 31ec84e a369b1f 0703f7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
RUN apt update && apt install -y \
git \
build-essential \
libopenblas-dev \
wget \
python3-pip \
nodejs \
npm
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
WORKDIR $HOME/app
COPY --chown=user . $HOME/app
RUN python3 -m pip install aphrodite-engine
RUN wget -O part_a.gguf https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO-GGUF/resolve/main/MiquMaid-v2-2x70B-DPO.Q4_K_M.gguf_part_aa?download=true
RUN wget -O part_b.gguf https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO-GGUF/resolve/main/MiquMaid-v2-2x70B-DPO.Q4_K_M.gguf_part_ab?download=true
RUN cat part_a.gguf part_b.gguf > model.gguf
RUN rm part_a.gguf part_b.gguf
# Download config.json
RUN wget -O config.json "https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO/resolve/main/config.json?download=true"
# Download special_tokens_map.json
RUN wget -O special_tokens_map.json "https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO/resolve/main/special_tokens_map.json?download=true"
# Download tokenizer.json
RUN wget -O tokenizer.json "https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO/resolve/main/tokenizer.json?download=true"
# Download tokenizer.model
RUN wget -O tokenizer.model "https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO/resolve/main/tokenizer.model?download=true"
RUN wget -O tokenizer_config.json "https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO/resolve/main/tokenizer_config.json?download=true"
# Download tokenizer_config.json
RUN wget -O tokenizer_config.json "https://huggingface.co/NeverSleep/MiquMaid-v2-2x70B-DPO/resolve/main/tokenizer_config.json?download=true"
RUN wget -O gguf_to_torch.py https://raw.githubusercontent.com/PygmalionAI/aphrodite-engine/main/examples/gguf_to_torch.py
RUN python3 -m pip install protobuf==3.20.*
RUN python3 gguf_to_torch.py --input $HOME/app/model.gguf --output $HOME/app/
EXPOSE 7860
# Should expose on all network interfaces so 0.0.0.0 not only localhost
CMD /bin/bash -c "/bin/python3 -m aphrodite.endpoints.openai.api_server $ENGINE_ARGS --port 7860 --host 0.0.0.0 --model $HOME/app/"
|