Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,10 @@ from huggingface_hub import InferenceClient
|
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
|
|
|
5 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
6 |
|
|
|
7 |
secret_prompt = os.getenv("SECRET_PROMPT")
|
8 |
|
9 |
def format_prompt(new_message, history):
|
@@ -14,17 +16,11 @@ def format_prompt(new_message, history):
|
|
14 |
prompt += f"[INST] {new_message} [/INST]"
|
15 |
return prompt
|
16 |
|
17 |
-
def generate(prompt, history,
|
18 |
-
|
19 |
-
max_new_tokens=512,
|
20 |
-
top_p=0.95,
|
21 |
-
repetition_penalty=1.0):
|
22 |
-
|
23 |
temperature = float(temperature)
|
24 |
-
|
25 |
if temperature < 1e-2:
|
26 |
temperature = 1e-2
|
27 |
-
|
28 |
top_p = float(top_p)
|
29 |
|
30 |
generate_kwargs = dict(
|
@@ -37,12 +33,7 @@ def generate(prompt, history,
|
|
37 |
)
|
38 |
|
39 |
formatted_prompt = format_prompt(prompt, history)
|
40 |
-
|
41 |
-
stream = client.text_generation(formatted_prompt,
|
42 |
-
**generate_kwargs,
|
43 |
-
stream=True,
|
44 |
-
details=True,
|
45 |
-
return_full_text=False)
|
46 |
output = ""
|
47 |
|
48 |
for response in stream:
|
@@ -50,18 +41,11 @@ def generate(prompt, history,
|
|
50 |
yield output
|
51 |
return output
|
52 |
|
53 |
-
# Chatbot
|
54 |
-
|
55 |
-
bubble_full_width=False,
|
56 |
-
show_label=False,
|
57 |
-
show_copy_button=True,
|
58 |
-
likeable=True,)
|
59 |
|
60 |
-
# Gradio-Demo
|
61 |
theme = 'syddharth/gray-minimal'
|
62 |
-
demo = gr.ChatInterface(fn=generate,
|
63 |
-
chatbot=ailexchatbot,
|
64 |
-
title="Ailexs Mixtral 8x7b Chat",
|
65 |
-
theme=theme)
|
66 |
|
67 |
-
demo.queue().launch(show_api=False)
|
|
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
|
5 |
+
# Klient für die Inferenz
|
6 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
7 |
|
8 |
+
# Geheime Eingabeaufforderung aus Umgebungsvariablen
|
9 |
secret_prompt = os.getenv("SECRET_PROMPT")
|
10 |
|
11 |
def format_prompt(new_message, history):
|
|
|
16 |
prompt += f"[INST] {new_message} [/INST]"
|
17 |
return prompt
|
18 |
|
19 |
+
def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
20 |
+
# Konfiguration der Parameter
|
|
|
|
|
|
|
|
|
21 |
temperature = float(temperature)
|
|
|
22 |
if temperature < 1e-2:
|
23 |
temperature = 1e-2
|
|
|
24 |
top_p = float(top_p)
|
25 |
|
26 |
generate_kwargs = dict(
|
|
|
33 |
)
|
34 |
|
35 |
formatted_prompt = format_prompt(prompt, history)
|
36 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
|
|
|
|
|
|
|
|
|
|
37 |
output = ""
|
38 |
|
39 |
for response in stream:
|
|
|
41 |
yield output
|
42 |
return output
|
43 |
|
44 |
+
# Chatbot ohne Avatare und mit transparentem Design
|
45 |
+
samir_chatbot = gr.Chatbot(bubble_full_width=True, show_label=False, show_copy_button=False, likeable=False)
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
# Minimalistisches Theme und Konfiguration der Gradio-Demo
|
48 |
theme = 'syddharth/gray-minimal'
|
49 |
+
demo = gr.ChatInterface(fn=generate, chatbot=samir_chatbot, title="Ailexs Mixtral 8x7b Chat", theme=theme)
|
|
|
|
|
|
|
50 |
|
51 |
+
demo.queue().launch(show_api=False)
|