|
import gradio as gr |
|
import spaces |
|
from PIL import Image |
|
import os |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoProcessor |
|
import subprocess |
|
from io import BytesIO |
|
|
|
|
|
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) |
|
|
|
|
|
|
|
model_id = "microsoft/Phi-3.5-vision-instruct" |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_id, |
|
trust_remote_code=True, |
|
torch_dtype=torch.float16, |
|
use_flash_attention_2=False, |
|
) |
|
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16) |
|
|
|
@spaces.GPU(duration=120) |
|
def solve_math_problem(image): |
|
|
|
model.to('cuda') |
|
|
|
|
|
messages = [ |
|
{"role": "user", "content": "<|image_1|>\nSolve this math problem step by step. Explain your reasoning clearly."}, |
|
] |
|
prompt = processor.tokenizer.apply_chat_template( |
|
messages, tokenize=False, add_generation_prompt=True |
|
) |
|
|
|
|
|
inputs = processor(prompt, image, return_tensors="pt").to("cuda") |
|
|
|
|
|
generation_args = { |
|
"max_new_tokens": 1000, |
|
"temperature": 0.2, |
|
"do_sample": True, |
|
} |
|
generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args) |
|
|
|
|
|
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:] |
|
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
|
|
|
|
|
model.to('cpu') |
|
return response |
|
|
|
|
|
custom_css = """ |
|
<style> |
|
body { |
|
font-family: 'Arial', sans-serif; |
|
background-color: #f0f3f7; |
|
margin: 0; |
|
padding: 0; |
|
} |
|
.container { |
|
max-width: 1200px; |
|
margin: 0 auto; |
|
padding: 20px; |
|
} |
|
.header { |
|
background-color: #2c3e50; |
|
color: white; |
|
padding: 20px 0; |
|
text-align: center; |
|
} |
|
.header h1 { |
|
margin: 0; |
|
font-size: 2.5em; |
|
} |
|
.main-content { |
|
display: flex; |
|
justify-content: space-between; |
|
margin-top: 30px; |
|
} |
|
.input-section, .output-section { |
|
width: 48%; |
|
background-color: white; |
|
border-radius: 8px; |
|
padding: 20px; |
|
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); |
|
} |
|
.gr-button { |
|
background-color: #27ae60; |
|
color: white; |
|
border: none; |
|
padding: 10px 20px; |
|
border-radius: 5px; |
|
cursor: pointer; |
|
transition: background-color 0.3s; |
|
} |
|
.gr-button:hover { |
|
background-color: #2ecc71; |
|
} |
|
.examples-section { |
|
margin-top: 30px; |
|
background-color: white; |
|
border-radius: 8px; |
|
padding: 20px; |
|
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); |
|
} |
|
.examples-section h3 { |
|
margin-top: 0; |
|
color: #2c3e50; |
|
} |
|
.footer { |
|
text-align: center; |
|
margin-top: 30px; |
|
color: #7f8c8d; |
|
} |
|
</style> |
|
""" |
|
|
|
|
|
with gr.Blocks(css=custom_css) as iface: |
|
gr.HTML(""" |
|
<div class="header"> |
|
<h1>AI Math Equation Solver</h1> |
|
<p>Upload an image of a math problem, and our AI will solve it step by step!</p> |
|
</div> |
|
""") |
|
|
|
with gr.Row(equal_height=True): |
|
with gr.Column(): |
|
gr.HTML("<h2>Upload Your Math Problem</h2>") |
|
input_image = gr.Image(type="pil", label="Upload Math Problem Image") |
|
submit_btn = gr.Button("Solve Problem", elem_classes=["gr-button"]) |
|
|
|
with gr.Column(): |
|
gr.HTML("<h2>Solution</h2>") |
|
output_text = gr.Textbox(label="Step-by-step Solution", lines=10) |
|
|
|
gr.HTML("<h3>Try These Examples</h3>") |
|
examples = gr.Examples( |
|
examples=[ |
|
os.path.join(os.path.dirname(__file__), "eqn1.png"), |
|
os.path.join(os.path.dirname(__file__), "eqn2.png") |
|
], |
|
inputs=input_image, |
|
outputs=output_text, |
|
fn=solve_math_problem, |
|
cache_examples=True, |
|
) |
|
|
|
gr.HTML(""" |
|
<div class="footer"> |
|
<p>Powered by Gradio and AI - Created for educational purposes</p> |
|
</div> |
|
""") |
|
|
|
submit_btn.click(fn=solve_math_problem, inputs=input_image, outputs=output_text) |
|
|
|
|
|
iface.launch() |