Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,57 +1,41 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
from PIL import Image
|
4 |
-
import os
|
5 |
-
import tempfile
|
6 |
import torch
|
7 |
-
from
|
8 |
-
import
|
|
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
13 |
model_id,
|
14 |
trust_remote_code=True,
|
15 |
-
torch_dtype=torch.float16,
|
16 |
-
|
|
|
17 |
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
|
18 |
-
|
19 |
-
|
20 |
-
def
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
os.makedirs(uploaded_file_dir,exist_ok=True)
|
29 |
-
# saves the uploaded image as a temporary file
|
30 |
-
name = f"tmp{secrets.token_hex(20)}.jpg"
|
31 |
-
filename = os.path.join(uploaded_file_dir, name)
|
32 |
-
# If the input was a sketch then convert into RGB format
|
33 |
-
if should_convert:
|
34 |
-
new_img = Image.new('RGB', size=(image.width, image.height), color=(255, 255, 255))
|
35 |
-
new_img.paste(image, (0, 0), mask=image)
|
36 |
-
image = new_img
|
37 |
-
# Saves the image in the temporary file
|
38 |
-
image.save(filename)
|
39 |
-
# Calling the model to process images
|
40 |
-
messages = [{
|
41 |
-
'role': 'system',
|
42 |
-
'content': [{'text': 'You are a helpful assistant.'}]
|
43 |
-
}, {
|
44 |
-
'role': 'user',
|
45 |
-
'content': [
|
46 |
-
{'image': f'file://{filename}'},
|
47 |
-
{'text': 'Please describe the math-related content in this image, ensuring that any LaTeX formulas are correctly transcribed. Non-mathematical details do not need to be described.'}
|
48 |
-
]
|
49 |
-
}]
|
50 |
prompt = processor.tokenizer.apply_chat_template(
|
51 |
messages, tokenize=False, add_generation_prompt=True
|
52 |
)
|
|
|
53 |
# Process the input
|
54 |
-
inputs = processor(prompt, image, return_tensors="pt")
|
55 |
|
56 |
# Generate the response
|
57 |
generation_args = {
|
@@ -64,115 +48,116 @@ def process_image(image,should_convert=False):
|
|
64 |
# Decode the response
|
65 |
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
|
66 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
|
|
|
|
|
67 |
return response
|
68 |
|
69 |
-
#
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
"""
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
state = gr.State({"tab_index": 0})
|
127 |
-
with gr.Row():
|
128 |
with gr.Column():
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
input_sketchpad = gr.Sketchpad(type="pil", label="Sketch", layers=False)
|
134 |
-
input_tabs.select(fn=tabs_select, inputs=[state])
|
135 |
-
input_text = gr.Textbox(label="input your question")
|
136 |
-
with gr.Row():
|
137 |
-
with gr.Column():
|
138 |
-
clear_btn = gr.ClearButton(
|
139 |
-
[*input_image, input_sketchpad, input_text])
|
140 |
-
with gr.Column():
|
141 |
-
submit_btn = gr.Button("Submit", variant="primary")
|
142 |
with gr.Column():
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
}, {
|
169 |
-
"left": "\\[",
|
170 |
-
"right": "\\]",
|
171 |
-
"display": True
|
172 |
-
}],
|
173 |
-
elem_id="qwen-md")
|
174 |
-
submit_btn.click(
|
175 |
-
fn=math_chat_bot,
|
176 |
-
inputs=[*input_image, input_sketchpad, input_text, state],
|
177 |
-
outputs=output_md)
|
178 |
-
demo.launch()
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
from PIL import Image
|
4 |
+
import os
|
|
|
5 |
import torch
|
6 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
7 |
+
import subprocess
|
8 |
+
from io import BytesIO
|
9 |
|
10 |
+
# Install flash-attn
|
11 |
+
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
12 |
+
|
13 |
+
|
14 |
+
# Load the model and processor
|
15 |
+
model_id = "microsoft/Phi-3.5-vision-instruct"
|
16 |
+
model = AutoModelForCausalLM.from_pretrained(
|
17 |
model_id,
|
18 |
trust_remote_code=True,
|
19 |
+
torch_dtype=torch.float16,
|
20 |
+
use_flash_attention_2=False, # Explicitly disable Flash Attention 2
|
21 |
+
)
|
22 |
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
|
23 |
+
|
24 |
+
@spaces.GPU(duration=120)
|
25 |
+
def solve_math_problem(image):
|
26 |
+
# Move model to GPU for this function call
|
27 |
+
model.to('cuda')
|
28 |
+
|
29 |
+
# Prepare the input
|
30 |
+
messages = [
|
31 |
+
{"role": "user", "content": "<|image_1|>\nSolve this math problem step by step. Explain your reasoning clearly."},
|
32 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
prompt = processor.tokenizer.apply_chat_template(
|
34 |
messages, tokenize=False, add_generation_prompt=True
|
35 |
)
|
36 |
+
|
37 |
# Process the input
|
38 |
+
inputs = processor(prompt, image, return_tensors="pt").to("cuda")
|
39 |
|
40 |
# Generate the response
|
41 |
generation_args = {
|
|
|
48 |
# Decode the response
|
49 |
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
|
50 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
51 |
+
|
52 |
+
# Move model back to CPU to free up GPU memory
|
53 |
+
model.to('cpu')
|
54 |
return response
|
55 |
|
56 |
+
# Custom CSS
|
57 |
+
custom_css = """
|
58 |
+
<style>
|
59 |
+
body {
|
60 |
+
font-family: 'Arial', sans-serif;
|
61 |
+
background-color: #f0f3f7;
|
62 |
+
margin: 0;
|
63 |
+
padding: 0;
|
64 |
+
}
|
65 |
+
.container {
|
66 |
+
max-width: 1200px;
|
67 |
+
margin: 0 auto;
|
68 |
+
padding: 20px;
|
69 |
+
}
|
70 |
+
.header {
|
71 |
+
background-color: #2c3e50;
|
72 |
+
color: white;
|
73 |
+
padding: 20px 0;
|
74 |
+
text-align: center;
|
75 |
+
}
|
76 |
+
.header h1 {
|
77 |
+
margin: 0;
|
78 |
+
font-size: 2.5em;
|
79 |
+
}
|
80 |
+
.main-content {
|
81 |
+
display: flex;
|
82 |
+
justify-content: space-between;
|
83 |
+
margin-top: 30px;
|
84 |
+
}
|
85 |
+
.input-section, .output-section {
|
86 |
+
width: 48%;
|
87 |
+
background-color: white;
|
88 |
+
border-radius: 8px;
|
89 |
+
padding: 20px;
|
90 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
91 |
+
}
|
92 |
+
.gr-button {
|
93 |
+
background-color: #27ae60;
|
94 |
+
color: white;
|
95 |
+
border: none;
|
96 |
+
padding: 10px 20px;
|
97 |
+
border-radius: 5px;
|
98 |
+
cursor: pointer;
|
99 |
+
transition: background-color 0.3s;
|
100 |
+
}
|
101 |
+
.gr-button:hover {
|
102 |
+
background-color: #2ecc71;
|
103 |
+
}
|
104 |
+
.examples-section {
|
105 |
+
margin-top: 30px;
|
106 |
+
background-color: white;
|
107 |
+
border-radius: 8px;
|
108 |
+
padding: 20px;
|
109 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
110 |
+
}
|
111 |
+
.examples-section h3 {
|
112 |
+
margin-top: 0;
|
113 |
+
color: #2c3e50;
|
114 |
+
}
|
115 |
+
.footer {
|
116 |
+
text-align: center;
|
117 |
+
margin-top: 30px;
|
118 |
+
color: #7f8c8d;
|
119 |
+
}
|
120 |
+
</style>
|
121 |
"""
|
122 |
|
123 |
+
# Create the Gradio interface
|
124 |
+
with gr.Blocks(css=custom_css) as iface:
|
125 |
+
gr.HTML("""
|
126 |
+
<div class="header">
|
127 |
+
<h1>AI Math Equation Solver</h1>
|
128 |
+
<p>Upload an image of a math problem, and our AI will solve it step by step!</p>
|
129 |
+
</div>
|
130 |
+
""")
|
131 |
+
|
132 |
+
with gr.Row(equal_height=True):
|
|
|
|
|
133 |
with gr.Column():
|
134 |
+
gr.HTML("<h2>Upload Your Math Problem</h2>")
|
135 |
+
input_image = gr.Image(type="pil", label="Upload Math Problem Image")
|
136 |
+
submit_btn = gr.Button("Solve Problem", elem_classes=["gr-button"])
|
137 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
with gr.Column():
|
139 |
+
gr.HTML("<h2>Solution</h2>")
|
140 |
+
output_text = gr.Textbox(label="Step-by-step Solution", lines=10)
|
141 |
+
|
142 |
+
gr.HTML("<h3>Try These Examples</h3>")
|
143 |
+
examples = gr.Examples(
|
144 |
+
examples=[
|
145 |
+
os.path.join(os.path.dirname(__file__), "eqn1.png"),
|
146 |
+
os.path.join(os.path.dirname(__file__), "eqn2.png")
|
147 |
+
],
|
148 |
+
inputs=input_image,
|
149 |
+
outputs=output_text,
|
150 |
+
fn=solve_math_problem,
|
151 |
+
cache_examples=True,
|
152 |
+
)
|
153 |
+
|
154 |
+
gr.HTML("""
|
155 |
+
<div class="footer">
|
156 |
+
<p>Powered by Gradio and AI - Created for educational purposes</p>
|
157 |
+
</div>
|
158 |
+
""")
|
159 |
+
|
160 |
+
submit_btn.click(fn=solve_math_problem, inputs=input_image, outputs=output_text)
|
161 |
+
|
162 |
+
# Launch the app
|
163 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|