Update app.py
Browse files
app.py
CHANGED
@@ -425,56 +425,59 @@ def chat_rag(
|
|
425 |
|
426 |
|
427 |
# Build the Gradio interface with tabs.
|
428 |
-
with gr.Blocks(
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
)
|
454 |
-
gr.
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
)
|
470 |
-
gr.
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
)
|
477 |
-
|
478 |
-
|
|
|
|
|
|
|
479 |
|
480 |
demo.launch()
|
|
|
425 |
|
426 |
|
427 |
# Build the Gradio interface with tabs.
|
428 |
+
with gr.Blocks(css="""
|
429 |
+
body {background-color: #f5f5f5; font-family: Arial, sans-serif;}
|
430 |
+
.gradio-container {max-width: 1000px; margin: auto; background: white; padding: 20px; border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);}
|
431 |
+
h1 {color: #333; text-align: center; font-size: 2rem;}
|
432 |
+
h2 {color: #444; margin-top: 10px; font-size: 1.5rem;}
|
433 |
+
.gr-tab {padding: 10px;}
|
434 |
+
""") as demo:
|
435 |
+
|
436 |
+
gr.Markdown("# 🚀 QLoRA Fine-tuning & RAG Chat Demo")
|
437 |
+
gr.Markdown("Welcome to the enhanced **QLoRA fine-tuning and RAG-based chatbot interface**. This tool lets you fine-tune an AI model, generate text, and interact with a chatbot using retrieval-augmented responses.")
|
438 |
+
|
439 |
+
with gr.TabbedInterface():
|
440 |
+
|
441 |
+
# Fine-tuning tab
|
442 |
+
with gr.Tab(label="⚙️ Fine-tune Model"):
|
443 |
+
gr.Markdown("### Train your custom R1 model")
|
444 |
+
gr.Markdown("Fine-tune the model using QLoRA. This is **optional**, but recommended for better performance.")
|
445 |
+
finetune_btn = gr.Button("Start Fine-tuning")
|
446 |
+
finetune_output = gr.Textbox(label="Status", interactive=False)
|
447 |
+
finetune_btn.click(finetune_small_subset, inputs=None, outputs=finetune_output)
|
448 |
+
|
449 |
+
# Text Generation tab
|
450 |
+
with gr.Tab(label="✍️ Text Generation"):
|
451 |
+
gr.Markdown("### Generate text using your fine-tuned model")
|
452 |
+
input_prompt = gr.Textbox(label="Enter Prompt", placeholder="Type something here...", lines=3)
|
453 |
+
temp_slider = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature")
|
454 |
+
topp_slider = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
|
455 |
+
min_tokens = gr.Slider(1, 1000, value=50, step=10, label="Min New Tokens")
|
456 |
+
max_tokens = gr.Slider(1, 1000, value=200, step=10, label="Max New Tokens")
|
457 |
+
generate_btn = gr.Button("Generate Text")
|
458 |
+
output_box = gr.Textbox(label="Generated Output", lines=8, interactive=False)
|
459 |
+
generate_btn.click(predict, inputs=[input_prompt, temp_slider, topp_slider, min_tokens, max_tokens], outputs=output_box)
|
460 |
+
|
461 |
+
# Model Comparison tab
|
462 |
+
with gr.Tab(label="🆚 Compare Models"):
|
463 |
+
gr.Markdown("### Compare text outputs from your fine-tuned model and the official model")
|
464 |
+
compare_prompt = gr.Textbox(label="Enter Comparison Prompt", placeholder="Enter a prompt here...", lines=3)
|
465 |
+
compare_temp = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature")
|
466 |
+
compare_topp = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
|
467 |
+
compare_min_tokens = gr.Slider(1, 1000, value=50, step=10, label="Min New Tokens")
|
468 |
+
compare_max_tokens = gr.Slider(1, 1000, value=200, step=10, label="Max New Tokens")
|
469 |
+
compare_btn = gr.Button("Compare Models")
|
470 |
+
compare_output1 = gr.Textbox(label="Custom Model Output", lines=6, interactive=False)
|
471 |
+
compare_output2 = gr.Textbox(label="Official Model Output", lines=6, interactive=False)
|
472 |
+
compare_btn.click(compare_models, inputs=[compare_prompt, compare_temp, compare_topp, compare_min_tokens, compare_max_tokens], outputs=[compare_output1, compare_output2])
|
473 |
+
|
474 |
+
# Chatbot tab
|
475 |
+
with gr.Tab(label="💬 AI Chatbot"):
|
476 |
+
gr.Markdown("### Chat with an AI assistant using retrieval-augmented generation (RAG)")
|
477 |
+
chatbot = gr.Chatbot(label="AI Chatbot", height=400)
|
478 |
+
chat_input = gr.Textbox(placeholder="Ask me anything...", lines=2)
|
479 |
+
chat_btn = gr.Button("Send")
|
480 |
+
chat_output = gr.Chatbot(label="Chat History")
|
481 |
+
chat_btn.click(chat_rag, inputs=[chat_input, chatbot, temp_slider, topp_slider, min_tokens, max_tokens], outputs=[chat_output, chatbot])
|
482 |
|
483 |
demo.launch()
|