Spaces:
Runtime error
Runtime error
manojpatil
commited on
Commit
•
e1a910b
1
Parent(s):
5a82cc1
Update app.py
Browse files
app.py
CHANGED
@@ -44,8 +44,8 @@ vectorstore = Milvus(connection_args=connection_args, collection_name=collection
|
|
44 |
|
45 |
#downloading the model
|
46 |
|
47 |
-
url = "https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.
|
48 |
-
output_file = "llama-2-7b-chat.
|
49 |
|
50 |
response = requests.get(url)
|
51 |
|
@@ -192,7 +192,7 @@ def stream(input_text,prompt,context1,context2) -> Generator:
|
|
192 |
# Initialize the LLM we'll be using
|
193 |
|
194 |
llm = LlamaCpp(
|
195 |
-
model_path="llama-2-7b-chat.
|
196 |
callbacks=[QueueCallback(q)],
|
197 |
verbose=True,
|
198 |
n_ctx=4000,
|
|
|
44 |
|
45 |
#downloading the model
|
46 |
|
47 |
+
url = "https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin"
|
48 |
+
output_file = "llama-2-7b-chat.ggmlv3.q2_K.bin" # The filename you want to save the downloaded file as
|
49 |
|
50 |
response = requests.get(url)
|
51 |
|
|
|
192 |
# Initialize the LLM we'll be using
|
193 |
|
194 |
llm = LlamaCpp(
|
195 |
+
model_path="llama-2-7b-chat.ggmlv3.q2_K.bin", # model path
|
196 |
callbacks=[QueueCallback(q)],
|
197 |
verbose=True,
|
198 |
n_ctx=4000,
|