Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
-
app_file:
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
|
2 |
---
|
3 |
+
title: chatbot_retry_undo_like
|
4 |
+
emoji: 🔥
|
5 |
+
colorFrom: indigo
|
6 |
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
+
sdk_version: 5.0.0
|
9 |
+
app_file: run.py
|
10 |
pinned: false
|
11 |
+
hf_oauth: true
|
12 |
---
|
|
|
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
huggingface_hub
|
run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_retry_undo_like"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio huggingface_hub "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from huggingface_hub import InferenceClient\n", "import gradio as gr\n", "\n", "client = InferenceClient()\n", "\n", "def respond(\n", " prompt: str,\n", " history,\n", "):\n", " if not history:\n", " history = [{\"role\": \"system\", \"content\": \"You are a friendly chatbot\"}]\n", " history.append({\"role\": \"user\", \"content\": prompt})\n", "\n", " yield history\n", "\n", " response = {\"role\": \"assistant\", \"content\": \"\"}\n", " for message in client.chat_completion( # type: ignore\n", " history,\n", " temperature=0.95,\n", " top_p=0.9,\n", " max_tokens=512,\n", " stream=True,\n", " model=\"HuggingFaceH4/zephyr-7b-beta\"\n", " ):\n", " response[\"content\"] += message.choices[0].delta.content or \"\"\n", " yield history + [response]\n", "\n", "\n", "def handle_undo(history, undo_data: gr.UndoData):\n", " return history[:undo_data.index], history[undo_data.index]['content']\n", "\n", "def handle_retry(history, retry_data: gr.RetryData):\n", " new_history = history[:retry_data.index]\n", " previous_prompt = history[retry_data.index]['content']\n", " yield from respond(previous_prompt, new_history)\n", "\n", "\n", "def handle_like(data: gr.LikeData):\n", " if data.liked:\n", " print(\"You upvoted this response: \", data.value)\n", " else:\n", " print(\"You downvoted this response: \", data.value)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"# Chat with Hugging Face Zephyr 7b \ud83e\udd17\")\n", " chatbot = gr.Chatbot(\n", " label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(\n", " None,\n", " \"https://em-content.zobj.net/source/twitter/376/hugging-face_1f917.png\",\n", " ),\n", " )\n", " prompt = gr.Textbox(max_lines=1, label=\"Chat Message\")\n", " prompt.submit(respond, [prompt, chatbot], [chatbot])\n", " prompt.submit(lambda: \"\", None, [prompt])\n", " chatbot.undo(handle_undo, chatbot, [chatbot, prompt])\n", " chatbot.retry(handle_retry, chatbot, [chatbot])\n", " chatbot.like(handle_like, None, None)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import InferenceClient
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
client = InferenceClient()
|
5 |
+
|
6 |
+
def respond(
|
7 |
+
prompt: str,
|
8 |
+
history,
|
9 |
+
):
|
10 |
+
if not history:
|
11 |
+
history = [{"role": "system", "content": "You are a friendly chatbot"}]
|
12 |
+
history.append({"role": "user", "content": prompt})
|
13 |
+
|
14 |
+
yield history
|
15 |
+
|
16 |
+
response = {"role": "assistant", "content": ""}
|
17 |
+
for message in client.chat_completion( # type: ignore
|
18 |
+
history,
|
19 |
+
temperature=0.95,
|
20 |
+
top_p=0.9,
|
21 |
+
max_tokens=512,
|
22 |
+
stream=True,
|
23 |
+
model="HuggingFaceH4/zephyr-7b-beta"
|
24 |
+
):
|
25 |
+
response["content"] += message.choices[0].delta.content or ""
|
26 |
+
yield history + [response]
|
27 |
+
|
28 |
+
|
29 |
+
def handle_undo(history, undo_data: gr.UndoData):
|
30 |
+
return history[:undo_data.index], history[undo_data.index]['content']
|
31 |
+
|
32 |
+
def handle_retry(history, retry_data: gr.RetryData):
|
33 |
+
new_history = history[:retry_data.index]
|
34 |
+
previous_prompt = history[retry_data.index]['content']
|
35 |
+
yield from respond(previous_prompt, new_history)
|
36 |
+
|
37 |
+
|
38 |
+
def handle_like(data: gr.LikeData):
|
39 |
+
if data.liked:
|
40 |
+
print("You upvoted this response: ", data.value)
|
41 |
+
else:
|
42 |
+
print("You downvoted this response: ", data.value)
|
43 |
+
|
44 |
+
|
45 |
+
with gr.Blocks() as demo:
|
46 |
+
gr.Markdown("# Chat with Hugging Face Zephyr 7b 🤗")
|
47 |
+
chatbot = gr.Chatbot(
|
48 |
+
label="Agent",
|
49 |
+
type="messages",
|
50 |
+
avatar_images=(
|
51 |
+
None,
|
52 |
+
"https://em-content.zobj.net/source/twitter/376/hugging-face_1f917.png",
|
53 |
+
),
|
54 |
+
)
|
55 |
+
prompt = gr.Textbox(max_lines=1, label="Chat Message")
|
56 |
+
prompt.submit(respond, [prompt, chatbot], [chatbot])
|
57 |
+
prompt.submit(lambda: "", None, [prompt])
|
58 |
+
chatbot.undo(handle_undo, chatbot, [chatbot, prompt])
|
59 |
+
chatbot.retry(handle_retry, chatbot, [chatbot])
|
60 |
+
chatbot.like(handle_like, None, None)
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
demo.launch()
|