lonestar108 commited on
Commit
be10b4a
1 Parent(s): 1ecec28
Files changed (1) hide show
  1. requirements.txt +5 -18
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  aiofiles==23.1.0
2
  fastapi==0.95.2
3
- gradio_client==0.2.5
4
- gradio==3.33.1
5
 
6
  accelerate==0.22.*
7
  colorama
@@ -11,11 +11,11 @@ markdown
11
  numpy==1.24
12
  optimum==1.12.0
13
  pandas
14
- peft==0.5.*
15
  Pillow>=9.5.0
16
  pyyaml
17
  requests
18
- safetensors==0.3.2
19
  transformers==4.32.*
20
  scipy
21
  sentencepiece
@@ -26,24 +26,11 @@ wandb
26
  # bitsandbytes
27
  bitsandbytes==0.41.1
28
 
29
- # AutoGPTQ
30
- https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-linux_x86_64.whl
31
-
32
  # ExLlama
33
- https://github.com/jllllll/exllama/releases/download/0.0.14/exllama-0.0.14+cu117-cp310-cp310-linux_x86_64.whl
34
 
35
  # llama-cpp-python without GPU support
36
  llama-cpp-python==0.1.83
37
 
38
- # llama-cpp-python with CUDA support
39
- https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.83+cu117-cp310-cp310-linux_x86_64.whl
40
-
41
- # llama-cpp-python with GGML support
42
- https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl
43
- https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-linux_x86_64.whl
44
-
45
- # GPTQ-for-LLaMa
46
- https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-linux_x86_64.whl
47
-
48
  # ctransformers
49
  https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.25+cu117-py3-none-any.whl
 
1
  aiofiles==23.1.0
2
  fastapi==0.95.2
3
+ gradio_client>=0.2.5
4
+ gradio>=3.33.1
5
 
6
  accelerate==0.22.*
7
  colorama
 
11
  numpy==1.24
12
  optimum==1.12.0
13
  pandas
14
+ peft>=0.5.*
15
  Pillow>=9.5.0
16
  pyyaml
17
  requests
18
+ safetensors
19
  transformers==4.32.*
20
  scipy
21
  sentencepiece
 
26
  # bitsandbytes
27
  bitsandbytes==0.41.1
28
 
 
 
 
29
  # ExLlama
30
+ exllama
31
 
32
  # llama-cpp-python without GPU support
33
  llama-cpp-python==0.1.83
34
 
 
 
 
 
 
 
 
 
 
 
35
  # ctransformers
36
  https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.25+cu117-py3-none-any.whl