Spaces:
Running
on
T4
Running
on
T4
Commit
Β·
2ffa822
1
Parent(s):
36e014d
Update
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ import uuid
|
|
8 |
from PIL import Image
|
9 |
from typing import List, Dict, Any, Iterator
|
10 |
import gradio as gr
|
11 |
-
|
12 |
|
13 |
# Add the project root to the Python path
|
14 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
@@ -21,11 +21,11 @@ from opentools.models.memory import Memory
|
|
21 |
from opentools.models.executor import Executor
|
22 |
from opentools.models.utils import make_json_serializable
|
23 |
|
24 |
-
class ChatMessage:
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
|
30 |
class Solver:
|
31 |
def __init__(
|
@@ -90,19 +90,18 @@ class Solver:
|
|
90 |
self.executor.set_query_cache_dir(_cache_dir)
|
91 |
|
92 |
# Step 1: Display the received inputs
|
93 |
-
import pdb; pdb.set_trace()
|
94 |
if user_image:
|
95 |
messages.append(ChatMessage(role="assistant", content=f"π Received Query: {user_query}\nπΌοΈ Image Uploaded"))
|
96 |
else:
|
97 |
messages.append(ChatMessage(role="assistant", content=f"π Received Query: {user_query}"))
|
98 |
yield messages
|
99 |
|
100 |
-
# Step 2: Add "thinking" status while processing
|
101 |
-
messages.append(ChatMessage(
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
))
|
106 |
|
107 |
# Step 3: Initialize problem-solving state
|
108 |
start_time = time.time()
|
@@ -112,13 +111,17 @@ class Solver:
|
|
112 |
# Step 4: Query Analysis
|
113 |
query_analysis = self.planner.analyze_query(user_query, img_path)
|
114 |
json_data["query_analysis"] = query_analysis
|
115 |
-
messages.append(ChatMessage(role="assistant",
|
|
|
|
|
116 |
yield messages
|
117 |
|
118 |
# Step 5: Execution loop (similar to your step-by-step solver)
|
119 |
while step_count < self.max_steps and (time.time() - start_time) < self.max_time:
|
120 |
step_count += 1
|
121 |
-
messages.append(ChatMessage(role="assistant",
|
|
|
|
|
122 |
yield messages
|
123 |
|
124 |
# Generate the next step
|
@@ -130,13 +133,16 @@ class Solver:
|
|
130 |
# Display the step information
|
131 |
messages.append(ChatMessage(
|
132 |
role="assistant",
|
133 |
-
content=f"
|
|
|
134 |
))
|
135 |
yield messages
|
136 |
|
137 |
# Handle tool execution or errors
|
138 |
if tool_name not in self.planner.available_tools:
|
139 |
-
messages.append(ChatMessage(
|
|
|
|
|
140 |
yield messages
|
141 |
continue
|
142 |
|
@@ -148,7 +154,10 @@ class Solver:
|
|
148 |
result = self.executor.execute_tool_command(tool_name, command)
|
149 |
result = make_json_serializable(result)
|
150 |
|
151 |
-
messages.append(ChatMessage(
|
|
|
|
|
|
|
152 |
yield messages
|
153 |
|
154 |
# Step 6: Memory update and stopping condition
|
@@ -156,7 +165,9 @@ class Solver:
|
|
156 |
stop_verification = self.planner.verificate_memory(user_query, img_path, query_analysis, self.memory)
|
157 |
conclusion = self.planner.extract_conclusion(stop_verification)
|
158 |
|
159 |
-
messages.append(ChatMessage(
|
|
|
|
|
160 |
yield messages
|
161 |
|
162 |
if conclusion == 'STOP':
|
@@ -254,7 +265,7 @@ def solve_problem_gradio(user_query, user_image, max_steps=10, max_time=60, api_
|
|
254 |
|
255 |
messages = [] # Initialize message list
|
256 |
for message_batch in solver.stream_solve_user_problem(user_query, user_image, api_key, messages):
|
257 |
-
yield [
|
258 |
|
259 |
|
260 |
|
@@ -270,8 +281,8 @@ def main(args):
|
|
270 |
max_steps = gr.Slider(value=5, minimum=1, maximum=10, step=1)
|
271 |
max_time = gr.Slider(value=180, minimum=60, maximum=300, step=20)
|
272 |
with gr.Column(scale=3):
|
273 |
-
chatbot_output = gr.Chatbot(label="Problem-Solving Output"
|
274 |
-
chatbot_output.like(lambda x: print(f"User liked: {x}"))
|
275 |
with gr.Row():
|
276 |
with gr.Column(scale=8):
|
277 |
user_query = gr.Textbox(show_label=False, placeholder="Type your question here...", container=False)
|
|
|
8 |
from PIL import Image
|
9 |
from typing import List, Dict, Any, Iterator
|
10 |
import gradio as gr
|
11 |
+
from gradio import ChatMessage
|
12 |
|
13 |
# Add the project root to the Python path
|
14 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
21 |
from opentools.models.executor import Executor
|
22 |
from opentools.models.utils import make_json_serializable
|
23 |
|
24 |
+
# class ChatMessage:
|
25 |
+
# def __init__(self, role: str, content: str, metadata: dict = None):
|
26 |
+
# self.role = role
|
27 |
+
# self.content = content
|
28 |
+
# self.metadata = metadata or {}
|
29 |
|
30 |
class Solver:
|
31 |
def __init__(
|
|
|
90 |
self.executor.set_query_cache_dir(_cache_dir)
|
91 |
|
92 |
# Step 1: Display the received inputs
|
|
|
93 |
if user_image:
|
94 |
messages.append(ChatMessage(role="assistant", content=f"π Received Query: {user_query}\nπΌοΈ Image Uploaded"))
|
95 |
else:
|
96 |
messages.append(ChatMessage(role="assistant", content=f"π Received Query: {user_query}"))
|
97 |
yield messages
|
98 |
|
99 |
+
# # Step 2: Add "thinking" status while processing
|
100 |
+
# messages.append(ChatMessage(
|
101 |
+
# role="assistant",
|
102 |
+
# content="",
|
103 |
+
# metadata={"title": "β³ Thinking: Processing input..."}
|
104 |
+
# ))
|
105 |
|
106 |
# Step 3: Initialize problem-solving state
|
107 |
start_time = time.time()
|
|
|
111 |
# Step 4: Query Analysis
|
112 |
query_analysis = self.planner.analyze_query(user_query, img_path)
|
113 |
json_data["query_analysis"] = query_analysis
|
114 |
+
messages.append(ChatMessage(role="assistant",
|
115 |
+
content=f"{query_analysis}",
|
116 |
+
metadata={"title": "π Query Analysis"}))
|
117 |
yield messages
|
118 |
|
119 |
# Step 5: Execution loop (similar to your step-by-step solver)
|
120 |
while step_count < self.max_steps and (time.time() - start_time) < self.max_time:
|
121 |
step_count += 1
|
122 |
+
# messages.append(ChatMessage(role="assistant",
|
123 |
+
# content=f"Generating next step...",
|
124 |
+
# metadata={"title": f"π Step {step_count}"}))
|
125 |
yield messages
|
126 |
|
127 |
# Generate the next step
|
|
|
133 |
# Display the step information
|
134 |
messages.append(ChatMessage(
|
135 |
role="assistant",
|
136 |
+
content=f"- Context: {context}\n- Sub-goal: {sub_goal}\n- Tool: {tool_name}",
|
137 |
+
metadata={"title": f"π Step {step_count}: {tool_name}"}
|
138 |
))
|
139 |
yield messages
|
140 |
|
141 |
# Handle tool execution or errors
|
142 |
if tool_name not in self.planner.available_tools:
|
143 |
+
messages.append(ChatMessage(
|
144 |
+
role="assistant",
|
145 |
+
content=f"β οΈ Error: Tool '{tool_name}' is not available."))
|
146 |
yield messages
|
147 |
continue
|
148 |
|
|
|
154 |
result = self.executor.execute_tool_command(tool_name, command)
|
155 |
result = make_json_serializable(result)
|
156 |
|
157 |
+
messages.append(ChatMessage(
|
158 |
+
role="assistant",
|
159 |
+
content=f"{json.dumps(result, indent=4)}",
|
160 |
+
metadata={"title": f"β
Step {step_count} Result: {tool_name}"}))
|
161 |
yield messages
|
162 |
|
163 |
# Step 6: Memory update and stopping condition
|
|
|
165 |
stop_verification = self.planner.verificate_memory(user_query, img_path, query_analysis, self.memory)
|
166 |
conclusion = self.planner.extract_conclusion(stop_verification)
|
167 |
|
168 |
+
messages.append(ChatMessage(
|
169 |
+
role="assistant",
|
170 |
+
content=f"π Step {step_count} Conclusion: {conclusion}"))
|
171 |
yield messages
|
172 |
|
173 |
if conclusion == 'STOP':
|
|
|
265 |
|
266 |
messages = [] # Initialize message list
|
267 |
for message_batch in solver.stream_solve_user_problem(user_query, user_image, api_key, messages):
|
268 |
+
yield [msg for msg in message_batch] # Ensure correct format for Gradio Chatbot
|
269 |
|
270 |
|
271 |
|
|
|
281 |
max_steps = gr.Slider(value=5, minimum=1, maximum=10, step=1)
|
282 |
max_time = gr.Slider(value=180, minimum=60, maximum=300, step=20)
|
283 |
with gr.Column(scale=3):
|
284 |
+
chatbot_output = gr.Chatbot(type="messages", label="Problem-Solving Output")
|
285 |
+
# chatbot_output.like(lambda x: print(f"User liked: {x}"))
|
286 |
with gr.Row():
|
287 |
with gr.Column(scale=8):
|
288 |
user_query = gr.Textbox(show_label=False, placeholder="Type your question here...", container=False)
|