Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ from qwen_vl_utils import process_vision_info
|
|
5 |
import logging
|
6 |
from typing import List, Dict
|
7 |
import gc
|
|
|
8 |
|
9 |
# Setup logging
|
10 |
logging.basicConfig(level=logging.INFO)
|
@@ -23,39 +24,37 @@ class HealthAssistant:
|
|
23 |
def initialize_model(self):
|
24 |
try:
|
25 |
logger.info("Loading Qwen2-VL model...")
|
|
|
26 |
self.model = Qwen2VLForConditionalGeneration.from_pretrained(
|
27 |
self.model_name,
|
28 |
-
torch_dtype=
|
29 |
-
|
30 |
-
|
31 |
-
)
|
32 |
-
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
33 |
-
self.processor = AutoProcessor.from_pretrained(
|
34 |
-
self.model_name,
|
35 |
-
min_pixels=256*28*28,
|
36 |
-
max_pixels=1280*28*28
|
37 |
)
|
|
|
|
|
|
|
38 |
logger.info("Model loaded successfully")
|
39 |
except Exception as e:
|
40 |
-
logger.error(f"Error
|
41 |
raise
|
42 |
|
43 |
def generate_response(self, message: str, history: List = None) -> str:
|
44 |
try:
|
45 |
-
# Format
|
46 |
messages = self._format_messages(message, history)
|
47 |
|
48 |
-
# Prepare for inference
|
49 |
text = self.processor.apply_chat_template(
|
50 |
messages,
|
51 |
tokenize=False,
|
52 |
add_generation_prompt=True
|
53 |
)
|
54 |
|
55 |
-
#
|
56 |
-
image_inputs, video_inputs =
|
57 |
|
58 |
-
#
|
59 |
inputs = self.processor(
|
60 |
text=[text],
|
61 |
images=image_inputs,
|
@@ -63,21 +62,24 @@ class HealthAssistant:
|
|
63 |
padding=True,
|
64 |
return_tensors="pt"
|
65 |
)
|
|
|
|
|
66 |
inputs = inputs.to(self.model.device)
|
67 |
|
68 |
# Generate response
|
69 |
generated_ids = self.model.generate(
|
70 |
**inputs,
|
71 |
-
max_new_tokens=
|
72 |
do_sample=True,
|
73 |
temperature=0.7,
|
74 |
top_p=0.9
|
75 |
)
|
76 |
|
77 |
-
#
|
78 |
generated_ids_trimmed = [
|
79 |
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
80 |
]
|
|
|
81 |
output_text = self.processor.batch_decode(
|
82 |
generated_ids_trimmed,
|
83 |
skip_special_tokens=True,
|
@@ -96,24 +98,29 @@ class HealthAssistant:
|
|
96 |
return "I apologize, but I encountered an error. Please try again."
|
97 |
|
98 |
def _format_messages(self, message: str, history: List = None) -> List[Dict]:
|
99 |
-
"""Format messages for
|
100 |
-
# Add system context
|
101 |
messages = []
|
102 |
-
|
103 |
-
# Add health context
|
104 |
health_context = self._get_health_context()
|
105 |
if health_context:
|
106 |
messages.append({
|
107 |
"role": "system",
|
108 |
-
"content": [{"type": "text", "text": f"
|
109 |
})
|
110 |
|
111 |
# Add conversation history
|
112 |
if history:
|
113 |
-
for user_msg, assistant_msg in history[-3:]:
|
114 |
messages.extend([
|
115 |
-
{
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
])
|
118 |
|
119 |
# Add current message
|
@@ -125,7 +132,6 @@ class HealthAssistant:
|
|
125 |
return messages
|
126 |
|
127 |
def _get_health_context(self) -> str:
|
128 |
-
"""Get health metrics and medications context"""
|
129 |
context_parts = []
|
130 |
|
131 |
if self.metrics:
|
@@ -185,7 +191,7 @@ class GradioInterface:
|
|
185 |
return "", history
|
186 |
|
187 |
def add_health_metrics(self, weight: float, steps: int, sleep: float) -> str:
|
188 |
-
if not all([weight, steps, sleep]):
|
189 |
return "⚠️ Please fill in all metrics."
|
190 |
|
191 |
if self.assistant.add_metrics(weight, steps, sleep):
|
@@ -202,19 +208,14 @@ class GradioInterface:
|
|
202 |
|
203 |
def create_interface(self):
|
204 |
with gr.Blocks(title="Health Assistant", theme=gr.themes.Soft()) as demo:
|
205 |
-
gr.Markdown(
|
206 |
-
"""
|
207 |
-
# 🏥 AI Health Assistant
|
208 |
-
Powered by Qwen2-VL for intelligent health guidance and monitoring.
|
209 |
-
"""
|
210 |
-
)
|
211 |
|
212 |
with gr.Tabs():
|
213 |
# Chat Interface
|
214 |
with gr.Tab("💬 Health Chat"):
|
215 |
chatbot = gr.Chatbot(
|
216 |
height=450,
|
217 |
-
show_label=False
|
218 |
)
|
219 |
with gr.Row():
|
220 |
msg = gr.Textbox(
|
@@ -262,14 +263,6 @@ class GradioInterface:
|
|
262 |
outputs=[med_status]
|
263 |
)
|
264 |
|
265 |
-
gr.Markdown(
|
266 |
-
"""
|
267 |
-
### ⚠️ Important Note
|
268 |
-
This AI assistant provides general health information only.
|
269 |
-
Always consult healthcare professionals for medical advice.
|
270 |
-
"""
|
271 |
-
)
|
272 |
-
|
273 |
return demo
|
274 |
|
275 |
def main():
|
@@ -278,8 +271,9 @@ def main():
|
|
278 |
demo = interface.create_interface()
|
279 |
demo.launch(
|
280 |
share=False,
|
281 |
-
|
282 |
-
|
|
|
283 |
)
|
284 |
except Exception as e:
|
285 |
logger.error(f"Error starting application: {e}")
|
|
|
5 |
import logging
|
6 |
from typing import List, Dict
|
7 |
import gc
|
8 |
+
import os
|
9 |
|
10 |
# Setup logging
|
11 |
logging.basicConfig(level=logging.INFO)
|
|
|
24 |
def initialize_model(self):
|
25 |
try:
|
26 |
logger.info("Loading Qwen2-VL model...")
|
27 |
+
# Initialize model with default settings
|
28 |
self.model = Qwen2VLForConditionalGeneration.from_pretrained(
|
29 |
self.model_name,
|
30 |
+
torch_dtype="auto",
|
31 |
+
device_map="auto",
|
32 |
+
trust_remote_code=True
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
)
|
34 |
+
|
35 |
+
# Initialize processor
|
36 |
+
self.processor = AutoProcessor.from_pretrained(self.model_name)
|
37 |
logger.info("Model loaded successfully")
|
38 |
except Exception as e:
|
39 |
+
logger.error(f"Error initializing model: {e}")
|
40 |
raise
|
41 |
|
42 |
def generate_response(self, message: str, history: List = None) -> str:
|
43 |
try:
|
44 |
+
# Format messages for Qwen2-VL
|
45 |
messages = self._format_messages(message, history)
|
46 |
|
47 |
+
# Prepare for inference using qwen_vl_utils
|
48 |
text = self.processor.apply_chat_template(
|
49 |
messages,
|
50 |
tokenize=False,
|
51 |
add_generation_prompt=True
|
52 |
)
|
53 |
|
54 |
+
# Process vision info (empty for text-only)
|
55 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
56 |
|
57 |
+
# Prepare inputs
|
58 |
inputs = self.processor(
|
59 |
text=[text],
|
60 |
images=image_inputs,
|
|
|
62 |
padding=True,
|
63 |
return_tensors="pt"
|
64 |
)
|
65 |
+
|
66 |
+
# Move to appropriate device
|
67 |
inputs = inputs.to(self.model.device)
|
68 |
|
69 |
# Generate response
|
70 |
generated_ids = self.model.generate(
|
71 |
**inputs,
|
72 |
+
max_new_tokens=128,
|
73 |
do_sample=True,
|
74 |
temperature=0.7,
|
75 |
top_p=0.9
|
76 |
)
|
77 |
|
78 |
+
# Trim and decode response
|
79 |
generated_ids_trimmed = [
|
80 |
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
81 |
]
|
82 |
+
|
83 |
output_text = self.processor.batch_decode(
|
84 |
generated_ids_trimmed,
|
85 |
skip_special_tokens=True,
|
|
|
98 |
return "I apologize, but I encountered an error. Please try again."
|
99 |
|
100 |
def _format_messages(self, message: str, history: List = None) -> List[Dict]:
|
101 |
+
"""Format messages for Qwen2-VL"""
|
|
|
102 |
messages = []
|
103 |
+
|
104 |
+
# Add health context as system message
|
105 |
health_context = self._get_health_context()
|
106 |
if health_context:
|
107 |
messages.append({
|
108 |
"role": "system",
|
109 |
+
"content": [{"type": "text", "text": f"Health Context:\n{health_context}"}]
|
110 |
})
|
111 |
|
112 |
# Add conversation history
|
113 |
if history:
|
114 |
+
for user_msg, assistant_msg in history[-3:]:
|
115 |
messages.extend([
|
116 |
+
{
|
117 |
+
"role": "user",
|
118 |
+
"content": [{"type": "text", "text": user_msg}]
|
119 |
+
},
|
120 |
+
{
|
121 |
+
"role": "assistant",
|
122 |
+
"content": [{"type": "text", "text": assistant_msg}]
|
123 |
+
}
|
124 |
])
|
125 |
|
126 |
# Add current message
|
|
|
132 |
return messages
|
133 |
|
134 |
def _get_health_context(self) -> str:
|
|
|
135 |
context_parts = []
|
136 |
|
137 |
if self.metrics:
|
|
|
191 |
return "", history
|
192 |
|
193 |
def add_health_metrics(self, weight: float, steps: int, sleep: float) -> str:
|
194 |
+
if not all([weight is not None, steps is not None, sleep is not None]):
|
195 |
return "⚠️ Please fill in all metrics."
|
196 |
|
197 |
if self.assistant.add_metrics(weight, steps, sleep):
|
|
|
208 |
|
209 |
def create_interface(self):
|
210 |
with gr.Blocks(title="Health Assistant", theme=gr.themes.Soft()) as demo:
|
211 |
+
gr.Markdown("# 🏥 AI Health Assistant")
|
|
|
|
|
|
|
|
|
|
|
212 |
|
213 |
with gr.Tabs():
|
214 |
# Chat Interface
|
215 |
with gr.Tab("💬 Health Chat"):
|
216 |
chatbot = gr.Chatbot(
|
217 |
height=450,
|
218 |
+
show_label=False,
|
219 |
)
|
220 |
with gr.Row():
|
221 |
msg = gr.Textbox(
|
|
|
263 |
outputs=[med_status]
|
264 |
)
|
265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
return demo
|
267 |
|
268 |
def main():
|
|
|
271 |
demo = interface.create_interface()
|
272 |
demo.launch(
|
273 |
share=False,
|
274 |
+
server_name="0.0.0.0",
|
275 |
+
server_port=7860,
|
276 |
+
enable_queue=True
|
277 |
)
|
278 |
except Exception as e:
|
279 |
logger.error(f"Error starting application: {e}")
|