Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
4 |
+
|
5 |
+
# Load model and tokenizer
|
6 |
+
def load_model():
|
7 |
+
model_name = "zeyadusf/text2pandas-T5"
|
8 |
+
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
9 |
+
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
10 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
11 |
+
model = model.to(device)
|
12 |
+
return model, tokenizer
|
13 |
+
|
14 |
+
model, tokenizer = load_model()
|
15 |
+
|
16 |
+
# Define the function to generate text
|
17 |
+
def generate_text(question, context, max_length=512, num_beams=4, early_stopping=True):
|
18 |
+
input_text = f"<question> {question} <context> {context}"
|
19 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=max_length).to(model.device)
|
20 |
+
|
21 |
+
with torch.no_grad():
|
22 |
+
outputs = model.generate(inputs, max_length=max_length, num_beams=num_beams, early_stopping=early_stopping)
|
23 |
+
|
24 |
+
predicted_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
25 |
+
return predicted_text
|
26 |
+
|
27 |
+
# Gradio interface
|
28 |
+
def gradio_interface(question, context, max_length, num_beams, early_stopping):
|
29 |
+
return generate_text(question, context, max_length, num_beams, early_stopping)
|
30 |
+
|
31 |
+
# Gradio UI Components
|
32 |
+
question_input = gr.inputs.Textbox(label="Enter the Question", default="what is the total amount of players for the rockets in 1998 only?")
|
33 |
+
context_input = gr.inputs.Textbox(label="Enter the Context", default="df = pd.DataFrame(columns=['player', 'years_for_rockets'])")
|
34 |
+
max_length_input = gr.inputs.Slider(minimum=50, maximum=1024, default=512, label="Max Length")
|
35 |
+
num_beams_input = gr.inputs.Slider(minimum=1, maximum=10, default=4, label="Number of Beams")
|
36 |
+
early_stopping_input = gr.inputs.Checkbox(default=True, label="Early Stopping")
|
37 |
+
|
38 |
+
# Custom CSS to style the slider, checkbox, and center the button
|
39 |
+
custom_css = """
|
40 |
+
/* Make the slider handle and bar light green */
|
41 |
+
input[type="range"] {
|
42 |
+
accent-color: lightgreen;
|
43 |
+
}
|
44 |
+
input[type="range"]::-webkit-slider-thumb {
|
45 |
+
background-color: #90EE90; /* Light green slider thumb */
|
46 |
+
}
|
47 |
+
input[type="range"]::-webkit-slider-runnable-track {
|
48 |
+
background-color: #32CD32; /* Light green slider track */
|
49 |
+
}
|
50 |
+
|
51 |
+
/* Make the checkbox light green */
|
52 |
+
input[type="checkbox"] {
|
53 |
+
accent-color: lightgreen;
|
54 |
+
}
|
55 |
+
|
56 |
+
/* Center the button */
|
57 |
+
.gr-button.gr-button-primary {
|
58 |
+
display: block;
|
59 |
+
margin: 0 auto;
|
60 |
+
background-color: #90EE90; /* Light green button */
|
61 |
+
color: black;
|
62 |
+
border-radius: 8px;
|
63 |
+
border: 2px solid #006400; /* Dark green border */
|
64 |
+
}
|
65 |
+
"""
|
66 |
+
|
67 |
+
# Create Gradio Interface
|
68 |
+
gr.Interface(
|
69 |
+
fn=gradio_interface,
|
70 |
+
inputs=[question_input, context_input, max_length_input, num_beams_input, early_stopping_input],
|
71 |
+
outputs="text",
|
72 |
+
title="Text to Pandas Code Generator",
|
73 |
+
description="Generate Pandas code by providing a question and a context.",
|
74 |
+
css=custom_css, # Apply the custom CSS
|
75 |
+
).launch()
|