Spaces:
Sleeping
Sleeping
Islam YAHIAOUI
commited on
Commit
·
31e6eb8
1
Parent(s):
3e33cc1
Update UI
Browse files- Helpers.py +2 -1
- __pycache__/Helpers.cpython-312.pyc +0 -0
- __pycache__/rag.cpython-312.pyc +0 -0
- app.py +73 -24
- rag.py +1 -1
Helpers.py
CHANGED
@@ -20,9 +20,10 @@ def generate_prompt(context, question, history=None):
|
|
20 |
{prompt_context}
|
21 |
|
22 |
[INST] {question} [/INST]
|
|
|
|
|
23 |
"""
|
24 |
|
25 |
-
# Response:
|
26 |
return prompt
|
27 |
|
28 |
# ==============================================================================================================================================
|
|
|
20 |
{prompt_context}
|
21 |
|
22 |
[INST] {question} [/INST]
|
23 |
+
|
24 |
+
Response:
|
25 |
"""
|
26 |
|
|
|
27 |
return prompt
|
28 |
|
29 |
# ==============================================================================================================================================
|
__pycache__/Helpers.cpython-312.pyc
CHANGED
Binary files a/__pycache__/Helpers.cpython-312.pyc and b/__pycache__/Helpers.cpython-312.pyc differ
|
|
__pycache__/rag.cpython-312.pyc
CHANGED
Binary files a/__pycache__/rag.cpython-312.pyc and b/__pycache__/rag.cpython-312.pyc differ
|
|
app.py
CHANGED
@@ -1,15 +1,17 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
|
|
|
|
4 |
from rag import run_rag
|
5 |
"""
|
6 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
7 |
"""
|
8 |
|
9 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
10 |
-
|
11 |
|
12 |
-
def
|
13 |
message,
|
14 |
history: list[tuple[str, str]],
|
15 |
system_message,
|
@@ -24,11 +26,12 @@ def respond(
|
|
24 |
messages.append({"role": "user", "content": val[0]})
|
25 |
if val[1]:
|
26 |
messages.append({"role": "assistant", "content": val[1]})
|
|
|
27 |
|
28 |
-
messages.append({"role": "user", "content":
|
29 |
|
30 |
response = ""
|
31 |
-
|
32 |
for message in client.chat_completion(
|
33 |
messages,
|
34 |
max_tokens=max_tokens,
|
@@ -44,25 +47,71 @@ def respond(
|
|
44 |
"""
|
45 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
46 |
"""
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
)
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
import os
|
5 |
+
|
6 |
+
import requests
|
7 |
from rag import run_rag
|
8 |
"""
|
9 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
10 |
"""
|
11 |
|
12 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
|
|
13 |
|
14 |
+
def chat(
|
15 |
message,
|
16 |
history: list[tuple[str, str]],
|
17 |
system_message,
|
|
|
26 |
messages.append({"role": "user", "content": val[0]})
|
27 |
if val[1]:
|
28 |
messages.append({"role": "assistant", "content": val[1]})
|
29 |
+
message =run_rag(message, history)
|
30 |
|
31 |
+
messages.append({"role": "user", "content": message})
|
32 |
|
33 |
response = ""
|
34 |
+
|
35 |
for message in client.chat_completion(
|
36 |
messages,
|
37 |
max_tokens=max_tokens,
|
|
|
47 |
"""
|
48 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
49 |
"""
|
50 |
+
chatbot = gr.Chatbot(
|
51 |
+
label="Retrieval Augmented Generation News & Finance",
|
52 |
+
# avatar_images=[None, BOT_AVATAR],
|
53 |
+
show_copy_button=True,
|
54 |
+
likeable=True,
|
55 |
+
layout="bubble")
|
56 |
+
theme = gr.themes.Base(
|
57 |
+
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
|
58 |
+
)
|
59 |
+
EXAMPLES = [
|
60 |
+
[ "Tell me about the latest news in the world ?"],
|
61 |
+
[ "Tell me about the increase in the price of Bitcoin ?"],
|
62 |
+
[ "Tell me about the actual situation in Ukraine ?"],
|
63 |
+
[ "Tell me about current situation in palestine ?"],
|
64 |
+
]
|
65 |
+
max_new_tokens = gr.Slider(
|
66 |
+
minimum=1,
|
67 |
+
maximum=2048,
|
68 |
+
value=512,
|
69 |
+
step=1,
|
70 |
+
interactive=True,
|
71 |
+
label="Max new tokens",
|
72 |
+
)
|
73 |
+
temperature = gr.Slider(
|
74 |
+
minimum=0.1,
|
75 |
+
maximum=0.9,
|
76 |
+
value=0.6,
|
77 |
+
step=0.1,
|
78 |
+
visible=True,
|
79 |
+
interactive=True,
|
80 |
+
label="Temperature",
|
81 |
+
info="Higher values will produce more diverse outputs.",
|
82 |
+
)
|
83 |
+
top_p = gr.Slider(
|
84 |
+
minimum=0.1,
|
85 |
+
maximum=1,
|
86 |
+
value=0.9,
|
87 |
+
step=0.05,
|
88 |
+
visible=True,
|
89 |
+
interactive=True,
|
90 |
+
label="Top-p (nucleus sampling)",
|
91 |
+
info="Higher values is equivalent to sampling more low-probability tokens.",
|
92 |
)
|
93 |
|
94 |
+
with gr.Blocks(
|
95 |
+
fill_height=True,
|
96 |
+
css=""".gradio-container .avatar-container {height: 40px width: 40px !important;} #duplicate-button {margin: auto; color: white; background: #f1a139; border-radius: 100vh; margin-top: 2px; margin-bottom: 2px;}""",
|
97 |
+
) as main:
|
98 |
+
gr.ChatInterface(
|
99 |
+
chat,
|
100 |
+
chatbot=chatbot,
|
101 |
+
title="Retrieval Augmented Generation (RAG) Chatbot",
|
102 |
+
description="A chatbot that uses a RAG model to generate responses based on the input query.",
|
103 |
+
examples=EXAMPLES,
|
104 |
+
theme=theme,
|
105 |
+
fill_height=True,
|
106 |
+
multimodal=True,
|
107 |
+
additional_inputs=[
|
108 |
+
max_new_tokens,
|
109 |
+
temperature,
|
110 |
+
top_p,
|
111 |
+
],
|
112 |
+
)
|
113 |
+
with gr.Blocks(theme=theme, css="footer {visibility: hidden}textbox{resize:none}", title="RAG") as demo:
|
114 |
+
gr.TabbedInterface([main] , tab_names=["Chatbot"] )
|
115 |
+
demo.launch()
|
116 |
+
|
117 |
+
|
rag.py
CHANGED
@@ -26,4 +26,4 @@ def run_rag(query, history=None):
|
|
26 |
documents = get_docs_by_indices(docs, indices)
|
27 |
prompt = generate_prompt(documents, query, history)
|
28 |
|
29 |
-
return
|
|
|
26 |
documents = get_docs_by_indices(docs, indices)
|
27 |
prompt = generate_prompt(documents, query, history)
|
28 |
|
29 |
+
return prompt
|