Spaces:
Runtime error
Runtime error
Se mejoraron estilos v2
Browse files
app.py
CHANGED
@@ -3,7 +3,6 @@ from threading import Thread
|
|
3 |
from typing import Iterator
|
4 |
|
5 |
import gradio as gr
|
6 |
-
#import spaces
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
|
@@ -13,8 +12,6 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
13 |
|
14 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
15 |
|
16 |
-
# Download model from Huggingface Hub
|
17 |
-
# Change this to meta-llama or the correct org name from Huggingface Hub
|
18 |
model_id = "ussipan/SipanGPT-0.2-Llama-3.2-1B-GGUF"
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -68,10 +65,51 @@ def generate(
|
|
68 |
yield "", conversation
|
69 |
|
70 |
|
71 |
-
#
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
<img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
|
74 |
-
<h1 style="font-size: 28px; margin: 0;">SipánGPT 0.2 Llama 3.2</h1>
|
75 |
<p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
|
76 |
<a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
|
77 |
</p>
|
@@ -80,7 +118,8 @@ PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; f
|
|
80 |
<a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
|
81 |
</p>
|
82 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 5.4k conversaciones.</p>
|
83 |
-
</div>
|
|
|
84 |
|
85 |
def handle_retry(history, retry_data: gr.RetryData):
|
86 |
new_history = history[:retry_data.index]
|
@@ -101,34 +140,55 @@ def handle_undo(history, undo_data: gr.UndoData):
|
|
101 |
def chat_examples_fill(data: gr.SelectData):
|
102 |
yield from generate(data.value['text'], chat_history = [], max_new_tokens = 1024, temperature = 0.6, top_p = 0.9, top_k = 50, repetition_penalty = 1.2)
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
-
with gr.Blocks(theme=
|
106 |
with gr.Column(elem_id="container", scale=1):
|
107 |
chatbot = gr.Chatbot(
|
108 |
label="SipánGPT 0.2 Llama 3.2",
|
109 |
show_label=False,
|
110 |
type="messages",
|
111 |
scale=1,
|
112 |
-
suggestions
|
113 |
{"text": "Háblame del reglamento de estudiantes de la universidad"},
|
114 |
{"text": "Qué becas ofrece la universidad"},
|
115 |
-
|
116 |
-
placeholder
|
117 |
-
|
118 |
|
119 |
msg = gr.Textbox(submit_btn=True, show_label=False)
|
120 |
with gr.Accordion('Additional inputs', open=False):
|
121 |
-
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS
|
122 |
-
temperature = gr.Slider(label="Temperature",minimum=0.1, maximum=4.0, step=0.1, value=0.6
|
123 |
-
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9
|
124 |
-
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50
|
125 |
-
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2
|
126 |
|
127 |
msg.submit(generate, [msg, chatbot, max_new_tokens, temperature, top_p, top_k, repetition_penalty], [msg, chatbot])
|
128 |
chatbot.retry(handle_retry, chatbot, [msg, chatbot])
|
129 |
chatbot.like(handle_like, None, None)
|
130 |
chatbot.undo(handle_undo, chatbot, [chatbot, msg])
|
131 |
-
chatbot.suggestion_select(chat_examples_fill, None, [msg, chatbot]
|
132 |
|
133 |
|
134 |
demo.launch()
|
|
|
3 |
from typing import Iterator
|
4 |
|
5 |
import gradio as gr
|
|
|
6 |
import torch
|
7 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
8 |
|
|
|
12 |
|
13 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
14 |
|
|
|
|
|
15 |
model_id = "ussipan/SipanGPT-0.2-Llama-3.2-1B-GGUF"
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
17 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
65 |
yield "", conversation
|
66 |
|
67 |
|
68 |
+
# Custom CSS to match your color scheme and fonts
|
69 |
+
custom_css = """
|
70 |
+
body {
|
71 |
+
background-color: #333333;
|
72 |
+
color: #f0f0f0;
|
73 |
+
font-family: 'Exo 2', system-ui, sans-serif;
|
74 |
+
}
|
75 |
+
.gradio-container {
|
76 |
+
background-color: #333333;
|
77 |
+
}
|
78 |
+
.gr-button {
|
79 |
+
background-color: #7dfa00 !important;
|
80 |
+
color: #333333 !important;
|
81 |
+
border: none !important;
|
82 |
+
}
|
83 |
+
.gr-button:hover {
|
84 |
+
background-color: #5fed00 !important;
|
85 |
+
}
|
86 |
+
.gr-input, .gr-textarea {
|
87 |
+
background-color: #444444 !important;
|
88 |
+
border-color: #7dfa00 !important;
|
89 |
+
color: #f0f0f0 !important;
|
90 |
+
}
|
91 |
+
.gr-form {
|
92 |
+
background-color: #444444 !important;
|
93 |
+
border-color: #7dfa00 !important;
|
94 |
+
}
|
95 |
+
.gr-box {
|
96 |
+
background-color: #444444 !important;
|
97 |
+
border-color: #7dfa00 !important;
|
98 |
+
}
|
99 |
+
.gr-padded {
|
100 |
+
background-color: #444444 !important;
|
101 |
+
}
|
102 |
+
h1, h2, h3 {
|
103 |
+
font-family: 'Fraunces', system-ui, serif;
|
104 |
+
color: #7dfa00;
|
105 |
+
}
|
106 |
+
"""
|
107 |
+
|
108 |
+
# Updated placeholder HTML
|
109 |
+
PLACEHOLDER = """
|
110 |
+
<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center; background-color: #333333; color: #f0f0f0; font-family: 'Exo 2', system-ui, sans-serif;">
|
111 |
<img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
|
112 |
+
<h1 style="font-size: 28px; margin: 0; font-family: 'Fraunces', system-ui, serif; color: #7dfa00;">SipánGPT 0.2 Llama 3.2</h1>
|
113 |
<p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
|
114 |
<a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
|
115 |
</p>
|
|
|
118 |
<a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
|
119 |
</p>
|
120 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 5.4k conversaciones.</p>
|
121 |
+
</div>
|
122 |
+
"""
|
123 |
|
124 |
def handle_retry(history, retry_data: gr.RetryData):
|
125 |
new_history = history[:retry_data.index]
|
|
|
140 |
def chat_examples_fill(data: gr.SelectData):
|
141 |
yield from generate(data.value['text'], chat_history = [], max_new_tokens = 1024, temperature = 0.6, top_p = 0.9, top_k = 50, repetition_penalty = 1.2)
|
142 |
|
143 |
+
# Create a custom theme
|
144 |
+
custom_theme = gr.themes.Base().set(
|
145 |
+
body_background_fill="#333333",
|
146 |
+
body_background_fill_dark="#333333",
|
147 |
+
body_text_color="#f0f0f0",
|
148 |
+
body_text_color_dark="#f0f0f0",
|
149 |
+
color_primary="#7dfa00",
|
150 |
+
background_fill_primary="#444444",
|
151 |
+
background_fill_secondary="#333333",
|
152 |
+
border_color_primary="#7dfa00",
|
153 |
+
button_primary_background_fill="#7dfa00",
|
154 |
+
button_primary_background_fill_dark="#7dfa00",
|
155 |
+
button_primary_text_color="#333333",
|
156 |
+
button_primary_text_color_dark="#333333",
|
157 |
+
input_background_fill="#444444",
|
158 |
+
input_background_fill_dark="#444444",
|
159 |
+
input_border_color="#7dfa00",
|
160 |
+
input_border_color_dark="#7dfa00",
|
161 |
+
input_placeholder_color="#bebebe",
|
162 |
+
input_placeholder_color_dark="#bebebe",
|
163 |
+
)
|
164 |
|
165 |
+
with gr.Blocks(theme=custom_theme, css=custom_css) as demo:
|
166 |
with gr.Column(elem_id="container", scale=1):
|
167 |
chatbot = gr.Chatbot(
|
168 |
label="SipánGPT 0.2 Llama 3.2",
|
169 |
show_label=False,
|
170 |
type="messages",
|
171 |
scale=1,
|
172 |
+
suggestions=[
|
173 |
{"text": "Háblame del reglamento de estudiantes de la universidad"},
|
174 |
{"text": "Qué becas ofrece la universidad"},
|
175 |
+
],
|
176 |
+
placeholder=PLACEHOLDER,
|
177 |
+
)
|
178 |
|
179 |
msg = gr.Textbox(submit_btn=True, show_label=False)
|
180 |
with gr.Accordion('Additional inputs', open=False):
|
181 |
+
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
|
182 |
+
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
|
183 |
+
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
|
184 |
+
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
|
185 |
+
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
|
186 |
|
187 |
msg.submit(generate, [msg, chatbot, max_new_tokens, temperature, top_p, top_k, repetition_penalty], [msg, chatbot])
|
188 |
chatbot.retry(handle_retry, chatbot, [msg, chatbot])
|
189 |
chatbot.like(handle_like, None, None)
|
190 |
chatbot.undo(handle_undo, chatbot, [chatbot, msg])
|
191 |
+
chatbot.suggestion_select(chat_examples_fill, None, [msg, chatbot])
|
192 |
|
193 |
|
194 |
demo.launch()
|