yusufs commited on
Commit
d90e4d6
·
1 Parent(s): 5f3bf21

feat(change-model): change to sail/Sailor-4B-Chat 89a866a7041e6ec023dd462adeca8e28dd53c83e

Browse files
Files changed (1) hide show
  1. run.sh +14 -2
run.sh CHANGED
@@ -3,9 +3,21 @@
3
 
4
  printf "Running vLLM OpenAI compatible API Server at port %s\n" "7860"
5
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  python -u /app/openai_compatible_api_server.py \
7
- --model meta-llama/Llama-3.2-3B-Instruct \
8
- --revision 0cb88a4f764b7a12671c53f0838cd831a0843b95 \
9
  --host 0.0.0.0 \
10
  --port 7860 \
11
  --max-num-batched-tokens 32768 \
 
3
 
4
  printf "Running vLLM OpenAI compatible API Server at port %s\n" "7860"
5
 
6
+ #python -u /app/openai_compatible_api_server.py \
7
+ # --model meta-llama/Llama-3.2-3B-Instruct \
8
+ # --revision 0cb88a4f764b7a12671c53f0838cd831a0843b95 \
9
+ # --host 0.0.0.0 \
10
+ # --port 7860 \
11
+ # --max-num-batched-tokens 32768 \
12
+ # --max-model-len 32768 \
13
+ # --dtype half \
14
+ # --enforce-eager \
15
+ # --gpu-memory-utilization 0.85
16
+
17
+
18
  python -u /app/openai_compatible_api_server.py \
19
+ --model sail/Sailor-4B-Chat \
20
+ --revision 89a866a7041e6ec023dd462adeca8e28dd53c83e \
21
  --host 0.0.0.0 \
22
  --port 7860 \
23
  --max-num-batched-tokens 32768 \