Spaces:
Sleeping
Sleeping
Elijahbodden
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,14 @@ os.system('pip install llama-cpp-python transformers torch')
|
|
10 |
from llama_cpp import Llama
|
11 |
from transformers import AutoTokenizer
|
12 |
import torch
|
13 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
model_id = "Elijahbodden/eliGPTv1.1"
|
15 |
|
16 |
# MODEL
|
@@ -112,6 +119,11 @@ def respond(
|
|
112 |
|
113 |
response += token
|
114 |
yield response
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
|
117 |
demo = gr.ChatInterface(
|
|
|
10 |
from llama_cpp import Llama
|
11 |
from transformers import AutoTokenizer
|
12 |
import torch
|
13 |
+
from huggingface_hub import upload_file
|
14 |
+
import json
|
15 |
+
from uuid import uuid4
|
16 |
+
|
17 |
+
# For logging
|
18 |
+
def upload_json_to_hub(json, file_id):
|
19 |
+
upload_file(path_or_fileobj=json.dumps(json).encode('utf-8'), path_in_repo=file_id, repo_id="Elijahbodden/EliGPT-convologs", token="os.getenv['HF_API_TOKEN']", repo_type="dataset")
|
20 |
+
|
21 |
model_id = "Elijahbodden/eliGPTv1.1"
|
22 |
|
23 |
# MODEL
|
|
|
119 |
|
120 |
response += token
|
121 |
yield response
|
122 |
+
|
123 |
+
messages.append({"role": "assistant", "content": response})
|
124 |
+
|
125 |
+
# Yes we make a new file every session because fuck my life
|
126 |
+
upload_json_to_hub(messages, uuid4())
|
127 |
|
128 |
|
129 |
demo = gr.ChatInterface(
|