Spaces:
Sleeping
Sleeping
seanpedrickcase
commited on
Commit
·
3db2499
1
Parent(s):
63067b7
Corrected references to extra-index-url in requirements/Dockerfile
Browse files- Dockerfile +1 -1
- app.py +0 -2
- requirements.txt +3 -1
- requirements_cpu.txt +1 -1
Dockerfile
CHANGED
@@ -18,7 +18,7 @@ COPY requirements_aws.txt .
|
|
18 |
|
19 |
RUN pip uninstall -y typing_extensions \
|
20 |
&& pip install --no-cache-dir --target=/install typing_extensions==4.12.2 \
|
21 |
-
&& pip install torch==2.5.1+cpu --
|
22 |
&& pip install --no-cache-dir --target=/install -r requirements_aws.txt
|
23 |
|
24 |
RUN rm requirements_aws.txt
|
|
|
18 |
|
19 |
RUN pip uninstall -y typing_extensions \
|
20 |
&& pip install --no-cache-dir --target=/install typing_extensions==4.12.2 \
|
21 |
+
&& pip install --no-cache-dir --target=/install torch==2.5.1+cpu --extra-index-url https://download.pytorch.org/whl/cpu \
|
22 |
&& pip install --no-cache-dir --target=/install -r requirements_aws.txt
|
23 |
|
24 |
RUN rm requirements_aws.txt
|
app.py
CHANGED
@@ -37,8 +37,6 @@ print("Is a CUDA device available on this computer?", backends.cudnn.enabled)
|
|
37 |
if cuda.is_available():
|
38 |
torch_device = "cuda"
|
39 |
os.system("nvidia-smi")
|
40 |
-
elif "spaces" in host_name:
|
41 |
-
torch_device = "cuda"
|
42 |
else:
|
43 |
torch_device = "cpu"
|
44 |
|
|
|
37 |
if cuda.is_available():
|
38 |
torch_device = "cuda"
|
39 |
os.system("nvidia-smi")
|
|
|
|
|
40 |
else:
|
41 |
torch_device = "cpu"
|
42 |
|
requirements.txt
CHANGED
@@ -12,7 +12,9 @@ html5lib==1.1
|
|
12 |
beautifulsoup4==4.12.3
|
13 |
rapidfuzz==3.10.1
|
14 |
torch==2.4.1 --extra-index-url https://download.pytorch.org/whl/cu121
|
15 |
-
llama-cpp-python==0.2.90 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121
|
|
|
|
|
16 |
transformers==4.47.0
|
17 |
numpy==1.26.4
|
18 |
typing_extensions==4.12.2
|
|
|
12 |
beautifulsoup4==4.12.3
|
13 |
rapidfuzz==3.10.1
|
14 |
torch==2.4.1 --extra-index-url https://download.pytorch.org/whl/cu121
|
15 |
+
#llama-cpp-python==0.2.90 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121
|
16 |
+
# Specify exact llama_cpp wheel for huggingface compatibility
|
17 |
+
https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.90-cu121/llama_cpp_python-0.2.90-cp310-cp310-linux_x86_64.whl
|
18 |
transformers==4.47.0
|
19 |
numpy==1.26.4
|
20 |
typing_extensions==4.12.2
|
requirements_cpu.txt
CHANGED
@@ -10,7 +10,7 @@ google-generativeai==0.8.3
|
|
10 |
html5lib==1.1
|
11 |
beautifulsoup4==4.12.3
|
12 |
rapidfuzz==3.10.1
|
13 |
-
torch==2.5.1 --index-url https://download.pytorch.org/whl/cpu
|
14 |
llama-cpp-python==0.2.90 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
15 |
transformers==4.47.0
|
16 |
numpy==1.26.4
|
|
|
10 |
html5lib==1.1
|
11 |
beautifulsoup4==4.12.3
|
12 |
rapidfuzz==3.10.1
|
13 |
+
torch==2.5.1 --extra-index-url https://download.pytorch.org/whl/cpu
|
14 |
llama-cpp-python==0.2.90 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
15 |
transformers==4.47.0
|
16 |
numpy==1.26.4
|