sagar007 commited on
Commit
9986a01
·
verified ·
1 Parent(s): 10dacc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -70
app.py CHANGED
@@ -1,84 +1,157 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoProcessor, pipeline
3
  from PIL import Image
 
4
  import torch
5
- import warnings
6
 
7
- # Suppress warnings
8
- warnings.filterwarnings("ignore")
 
 
 
 
 
 
 
9
 
10
- # Load Phi-3.5-vision model
11
- phi_model_id = "microsoft/Phi-3.5-vision-instruct"
12
- try:
13
- phi_model = AutoModelForCausalLM.from_pretrained(
14
- phi_model_id,
15
- device_map="auto",
16
- trust_remote_code=True,
17
- torch_dtype=torch.float16, # Use float16 to reduce memory usage
18
- _attn_implementation="eager" # Fall back to eager implementation if flash attention is not available
19
- )
20
- except ImportError:
21
- print("FlashAttention not available, falling back to eager implementation.")
22
- phi_model = AutoModelForCausalLM.from_pretrained(
23
- phi_model_id,
24
- device_map="auto",
25
- trust_remote_code=True,
26
- torch_dtype=torch.float16,
27
- _attn_implementation="eager"
28
  )
29
-
30
- phi_processor = AutoProcessor.from_pretrained(phi_model_id, trust_remote_code=True)
31
-
32
- # Load Llama 3.1 model
33
- llama_model_id = "meta-llama/Llama-3.1-8B"
34
- try:
35
- llama_pipeline = pipeline("text-generation", model=llama_model_id, device_map="auto", torch_dtype=torch.float16)
36
- except Exception as e:
37
- print(f"Error loading Llama 3.1 model: {e}")
38
- print("Falling back to a smaller, open-source model.")
39
- llama_model_id = "gpt2" # Fallback to a smaller, open-source model
40
- llama_pipeline = pipeline("text-generation", model=llama_model_id, device_map="auto")
41
-
42
- def analyze_image(image, query):
43
- prompt = f"<|user|>\n<|image_1|>\n{query}<|end|>\n<|assistant|>\n"
44
- inputs = phi_processor(prompt, images=image, return_tensors="pt").to(phi_model.device)
45
 
46
- with torch.no_grad():
47
- output = phi_model.generate(**inputs, max_new_tokens=100)
48
- return phi_processor.decode(output[0], skip_special_tokens=True)
49
-
50
- def generate_text(query, history):
51
- context = "\n".join([f"{h[0]}\n{h[1]}" for h in history])
52
- prompt = f"{context}\nHuman: {query}\nAI:"
53
 
54
- response = llama_pipeline(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)[0]['generated_text']
55
- return response.split("AI:")[-1].strip()
56
-
57
- def chatbot(image, query, history):
58
- if image is not None:
59
- response = analyze_image(Image.fromarray(image), query)
60
- else:
61
- response = generate_text(query, history)
62
 
63
- history.append((query, response))
64
- return "", history, history
65
-
66
- with gr.Blocks() as demo:
67
- gr.Markdown("# Multi-Modal AI Assistant")
68
-
69
- with gr.Row():
70
- image_input = gr.Image(type="numpy", label="Upload an image (optional)")
71
- chat_history = gr.Chatbot(label="Chat History")
72
 
73
- query_input = gr.Textbox(label="Ask a question or enter a prompt")
74
- submit_button = gr.Button("Submit")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- state = gr.State([])
 
 
 
 
 
 
 
 
77
 
78
- submit_button.click(
79
- chatbot,
80
- inputs=[image_input, query_input, state],
81
- outputs=[query_input, chat_history, state]
 
 
 
 
 
 
82
  )
 
 
 
 
 
 
 
 
83
 
84
- demo.launch()
 
 
1
  import gradio as gr
2
+ import spaces
3
  from PIL import Image
4
+ import os
5
  import torch
6
+ from transformers import AutoModelForCausalLM, AutoProcessor
7
 
8
+ # Load the model and processor
9
+ model_id = "microsoft/Phi-3.5-vision-instruct"
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_id,
12
+ trust_remote_code=True,
13
+ torch_dtype=torch.float16,
14
+ use_flash_attention_2=False, # Explicitly disable Flash Attention 2
15
+ )
16
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
17
 
18
+ @spaces.GPU(duration=120)
19
+ def solve_math_problem(image):
20
+ # Move model to GPU for this function call
21
+ model.to('cuda')
22
+
23
+ # Prepare the input
24
+ messages = [
25
+ {"role": "user", "content": "<|image_1|>\nSolve this math problem step by step. Explain your reasoning clearly."},
26
+ ]
27
+ prompt = processor.tokenizer.apply_chat_template(
28
+ messages, tokenize=False, add_generation_prompt=True
 
 
 
 
 
 
 
29
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ # Process the input
32
+ inputs = processor(prompt, image, return_tensors="pt").to("cuda")
 
 
 
 
 
33
 
34
+ # Generate the response
35
+ generation_args = {
36
+ "max_new_tokens": 1000,
37
+ "temperature": 0.2,
38
+ "do_sample": True,
39
+ }
40
+ generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
 
41
 
42
+ # Decode the response
43
+ generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
44
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 
 
 
 
 
 
45
 
46
+ # Move model back to CPU to free up GPU memory
47
+ model.to('cpu')
48
+ return response
49
+
50
+ # Custom CSS
51
+ custom_css = """
52
+ <style>
53
+ body {
54
+ font-family: 'Arial', sans-serif;
55
+ background-color: #f0f3f7;
56
+ margin: 0;
57
+ padding: 0;
58
+ }
59
+ .container {
60
+ max-width: 1200px;
61
+ margin: 0 auto;
62
+ padding: 20px;
63
+ }
64
+ .header {
65
+ background-color: #2c3e50;
66
+ color: white;
67
+ padding: 20px 0;
68
+ text-align: center;
69
+ }
70
+ .header h1 {
71
+ margin: 0;
72
+ font-size: 2.5em;
73
+ }
74
+ .main-content {
75
+ display: flex;
76
+ justify-content: space-between;
77
+ margin-top: 30px;
78
+ }
79
+ .input-section, .output-section {
80
+ width: 48%;
81
+ background-color: white;
82
+ border-radius: 8px;
83
+ padding: 20px;
84
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
85
+ }
86
+ .gr-button {
87
+ background-color: #27ae60;
88
+ color: white;
89
+ border: none;
90
+ padding: 10px 20px;
91
+ border-radius: 5px;
92
+ cursor: pointer;
93
+ transition: background-color 0.3s;
94
+ }
95
+ .gr-button:hover {
96
+ background-color: #2ecc71;
97
+ }
98
+ .examples-section {
99
+ margin-top: 30px;
100
+ background-color: white;
101
+ border-radius: 8px;
102
+ padding: 20px;
103
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
104
+ }
105
+ .examples-section h3 {
106
+ margin-top: 0;
107
+ color: #2c3e50;
108
+ }
109
+ .footer {
110
+ text-align: center;
111
+ margin-top: 30px;
112
+ color: #7f8c8d;
113
+ }
114
+ </style>
115
+ """
116
+
117
+ # Create the Gradio interface
118
+ with gr.Blocks(css=custom_css) as iface:
119
+ gr.HTML("""
120
+ <div class="header">
121
+ <h1>AI Math Equation Solver</h1>
122
+ <p>Upload an image of a math problem, and our AI will solve it step by step!</p>
123
+ </div>
124
+ """)
125
 
126
+ with gr.Row(equal_height=True):
127
+ with gr.Column():
128
+ gr.HTML("<h2>Upload Your Math Problem</h2>")
129
+ input_image = gr.Image(type="pil", label="Upload Math Problem Image")
130
+ submit_btn = gr.Button("Solve Problem", elem_classes=["gr-button"])
131
+
132
+ with gr.Column():
133
+ gr.HTML("<h2>Solution</h2>")
134
+ output_text = gr.Textbox(label="Step-by-step Solution", lines=10)
135
 
136
+ gr.HTML("<h3>Try These Examples</h3>")
137
+ examples = gr.Examples(
138
+ examples=[
139
+ os.path.join(os.path.dirname(__file__), "eqn1.png"),
140
+ os.path.join(os.path.dirname(__file__), "eqn2.png")
141
+ ],
142
+ inputs=input_image,
143
+ outputs=output_text,
144
+ fn=solve_math_problem,
145
+ cache_examples=True,
146
  )
147
+
148
+ gr.HTML("""
149
+ <div class="footer">
150
+ <p>Powered by Gradio and AI - Created for educational purposes</p>
151
+ </div>
152
+ """)
153
+
154
+ submit_btn.click(fn=solve_math_problem, inputs=input_image, outputs=output_text)
155
 
156
+ # Launch the app
157
+ iface.launch()