File size: 2,154 Bytes
b63ff60
064d9b6
 
 
d3d1e84
 
b9e6e86
d3d1e84
b9e6e86
 
 
 
 
 
996f587
d3d1e84
 
 
996f587
b63ff60
 
 
 
fb61f9e
cc684e2
fb61f9e
 
1cac260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79b705b
1cac260
 
79b705b
 
1cac260
79b705b
1cac260
79b705b
0812449
a886cc7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
# Install necessary packages
RUN apt update && apt install git build-essential libopenblas-dev wget python3-pip -y
RUN apt update && apt install -y wget
# Set up a new user named "user" with user ID 1000
RUN useradd -m -u 1000 user
# Switch to the "user" user
USER user
# Set home to the user's home directory
ENV HOME=/home/user \
	PATH=/home/user/.local/bin:$PATH

# Set the working directory to the user's home directory
WORKDIR $HOME/app

# Copy the current directory contents into the container at $HOME/app setting the owner to the user
COPY --chown=user . $HOME/app

# Set up the working directory
ARG MODEL_PART_A
ARG MODEL_PART_B
ARG MODEL_NAME
ARG ADDITIONAL
RUN git clone https://github.com/theroyallab/tabbyAPI
WORKDIR $HOME/app/tabbyAPI
RUN pip install -r requirements.txt -q
RUN pip install huggingface-hub -q
RUN echo "network:" > config.yml && \
    echo "  host: 127.0.0.1" >> config.yml && \
    echo "  port: 5000" >> config.yml && \
    echo "  disable_auth: False" >> config.yml && \
    echo "" >> config.yml && \
    echo "logging:" >> config.yml && \
    echo "  prompt: False" >> config.yml && \
    echo "  generation_params: False" >> config.yml && \
    echo "" >> config.yml && \
    echo "sampling:" >> config.yml && \
    echo "  override_preset: null" >> config.yml && \
    echo "" >> config.yml && \
    echo "developer:" >> config.yml && \
    echo "  unsafe_launch: False" >> config.yml && \
    echo "" >> config.yml && \
    echo "model:" >> config.yml && \
    echo "  model_dir: models" >> config.yml && \
    echo "  model_name: goliath-120b-gptq" >> config.yml && \
    echo "  use_dummy_models: False" >> config.yml
WORKDIR $HOME/app/tabbyAPI/models
RUN mkdir -p goliath-120b-gptq
WORKDIR $HOME/app/tabbyAPI/models/goliath-120b-gptq
RUN pip install huggingface-hub hf-transfer
ENV HF_HUB_ENABLE_HF_TRANSFER=1
# Ensure the model is downloaded into the current working directory (models folder)
RUN huggingface-cli download TheBloke/goliath-120b-gptq --local-dir ./ --local-dir-use-symlinks False --cache-dir ~/cache
WORKDIR $HOME/app/tabbyAPI
CMD ["python3", "main.py"]