File size: 737 Bytes
e37abe2
cc2997e
2800109
f3be2ab
cc2997e
 
f3be2ab
 
03cbecd
 
 
83ccd10
f3be2ab
 
cc2997e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import subprocess
import sys

# Uninstall the existing llama-cpp-python package
# uninstall_command = ["pip", "uninstall", "-y", "llama-cpp-python"]
# subprocess.run(uninstall_command, shell=True)

# Install llama-cpp-python with CUDA support
install_command = ["pip" + " install" + " --upgrade" + " --force-reinstall" + " llama-cpp-python"+ " --no-cache-dir"]
install_command_cmake_args = ["CMAKE_ARGS=-DLLAMA_CUBLAS=on" + " FORCE_CMAKE=1"]
install_command_cmake_args.extend(install_command)
subprocess.run(install_command, shell=True)

# Start the Hugging Face Space
uvicorn_command = ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
subprocess.run(uvicorn_command, shell=True)
subprocess.run(install_command, shell=True)