lukiod commited on
Commit
731f155
Β·
verified Β·
1 Parent(s): f0cbaa0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +229 -62
app.py CHANGED
@@ -13,13 +13,10 @@ logging.basicConfig(
13
  )
14
  logger = logging.getLogger(__name__)
15
 
16
- # Force CPU usage and set memory optimizations
17
- torch.set_num_threads(4)
18
-
19
  class HealthAssistant:
20
  def __init__(self, use_smaller_model=True):
21
  if use_smaller_model:
22
- self.model_name = "facebook/opt-125m"
23
  else:
24
  self.model_name = "Qwen/Qwen2-VL-7B-Instruct"
25
 
@@ -46,11 +43,10 @@ class HealthAssistant:
46
  trust_remote_code=True
47
  )
48
 
49
- self.model = self.model.to("cpu")
50
-
51
  if self.tokenizer.pad_token is None:
52
  self.tokenizer.pad_token = self.tokenizer.eos_token
53
 
 
54
  logger.info("Model loaded successfully")
55
  return True
56
 
@@ -58,19 +54,116 @@ class HealthAssistant:
58
  logger.error(f"Error in model initialization: {str(e)}")
59
  raise
60
 
61
- def is_initialized(self):
62
- return (self.model is not None and
63
- self.tokenizer is not None and
64
- hasattr(self.model, 'generate'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  def generate_response(self, message: str, history: List = None) -> str:
67
  try:
68
- if not self.is_initialized():
69
- return "System is still initializing. Please try again in a moment."
70
 
71
- # Prepare prompt
72
- prompt = self._prepare_prompt(message, history)
 
73
 
 
 
 
 
 
 
74
  # Tokenize
75
  inputs = self.tokenizer(
76
  prompt,
@@ -84,11 +177,11 @@ class HealthAssistant:
84
  with torch.no_grad():
85
  outputs = self.model.generate(
86
  inputs["input_ids"],
87
- max_new_tokens=128,
88
- num_beams=1,
89
- do_sample=True,
90
- temperature=0.7,
91
  top_p=0.9,
 
92
  pad_token_id=self.tokenizer.pad_token_id,
93
  eos_token_id=self.tokenizer.eos_token_id
94
  )
@@ -99,6 +192,9 @@ class HealthAssistant:
99
  skip_special_tokens=True
100
  )
101
 
 
 
 
102
  # Cleanup
103
  del outputs, inputs
104
  gc.collect()
@@ -107,30 +203,45 @@ class HealthAssistant:
107
 
108
  except Exception as e:
109
  logger.error(f"Error generating response: {str(e)}")
110
- return "I apologize, but I encountered an error. Please try again."
111
 
112
- def _prepare_prompt(self, message: str, history: List = None) -> str:
113
- parts = [
114
- "You are a helpful healthcare assistant providing accurate and helpful medical information.",
115
- self._get_health_context() or "No health data available yet."
116
- ]
 
 
 
 
 
 
 
 
 
117
 
118
- if history:
119
- parts.append("Previous conversation:")
120
- for h in history[-3:]:
121
- parts.extend([
122
- f"User: {h[0]}",
123
- f"Assistant: {h[1]}"
124
- ])
 
125
 
126
- parts.extend([
127
- f"User: {message}",
128
- "Assistant:"
129
- ])
130
 
131
- return "\n\n".join(parts)
 
 
 
 
 
 
 
132
 
133
  def _get_health_context(self) -> str:
 
134
  context_parts = []
135
 
136
  if self.metrics:
@@ -150,8 +261,7 @@ class HealthAssistant:
150
  med_info += f" | Note: {med['Notes']}"
151
  context_parts.append(med_info)
152
 
153
- return "\n".join(context_parts) if context_parts else ""
154
-
155
  def add_metrics(self, weight: float, steps: int, sleep: float) -> bool:
156
  try:
157
  self.metrics.append({
@@ -182,8 +292,6 @@ class GradioInterface:
182
  try:
183
  logger.info("Initializing Health Assistant...")
184
  self.assistant = HealthAssistant(use_smaller_model=True)
185
- if not self.assistant.is_initialized():
186
- raise RuntimeError("Health Assistant failed to initialize properly")
187
  logger.info("Health Assistant initialized successfully")
188
  except Exception as e:
189
  logger.error(f"Failed to initialize Health Assistant: {e}")
@@ -193,16 +301,27 @@ class GradioInterface:
193
  if not message.strip():
194
  return "", history
195
 
 
196
  response = self.assistant.generate_response(message, history)
 
 
197
  history.append([message, response])
 
 
198
  return "", history
199
 
200
  def add_health_metrics(self, weight: float, steps: int, sleep: float) -> str:
201
  if not all([weight is not None, steps is not None, sleep is not None]):
202
  return "⚠️ Please fill in all metrics."
203
 
 
 
 
204
  if self.assistant.add_metrics(weight, steps, sleep):
205
- return "βœ… Health metrics saved successfully!"
 
 
 
206
  return "❌ Error saving metrics."
207
 
208
  def add_medication_info(self, name: str, dosage: str, time: str, notes: str) -> str:
@@ -210,48 +329,86 @@ class GradioInterface:
210
  return "⚠️ Please fill in all required fields."
211
 
212
  if self.assistant.add_medication(name, dosage, time, notes):
213
- return "βœ… Medication added successfully!"
 
 
 
 
214
  return "❌ Error adding medication."
215
 
216
  def create_interface(self):
217
- with gr.Blocks(title="Health Assistant") as demo:
218
- gr.Markdown("# πŸ₯ AI Health Assistant")
 
 
 
 
 
219
 
220
  with gr.Tabs():
221
  # Chat Interface
222
- with gr.Tab("πŸ’¬ Health Chat"):
223
  chatbot = gr.Chatbot(
224
  value=[],
225
- height=450
 
226
  )
227
  with gr.Row():
228
  msg = gr.Textbox(
229
- placeholder="Ask your health question... (Press Enter)",
230
  lines=2,
231
  show_label=False,
232
  scale=9
233
  )
234
- send_btn = gr.Button("Send", scale=1)
235
- clear_btn = gr.Button("Clear Chat")
236
 
237
  # Health Metrics
238
  with gr.Tab("πŸ“Š Health Metrics"):
239
  with gr.Row():
240
- weight_input = gr.Number(label="Weight (kg)")
241
- steps_input = gr.Number(label="Steps")
242
- sleep_input = gr.Number(label="Hours Slept")
243
- metrics_btn = gr.Button("Save Metrics")
244
- metrics_status = gr.Markdown()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
  # Medication Manager
247
  with gr.Tab("πŸ’Š Medication Manager"):
248
  with gr.Row():
249
- med_name = gr.Textbox(label="Medication Name")
250
- med_dosage = gr.Textbox(label="Dosage")
251
- med_time = gr.Textbox(label="Time (e.g., 9:00 AM)")
252
- med_notes = gr.Textbox(label="Notes (optional)")
253
- med_btn = gr.Button("Add Medication")
254
- med_status = gr.Markdown()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
  # Event handlers
257
  msg.submit(self.chat_response, [msg, chatbot], [msg, chatbot])
@@ -270,16 +427,26 @@ class GradioInterface:
270
  outputs=[med_status]
271
  )
272
 
 
 
 
 
 
 
 
 
 
 
273
  demo.queue()
274
 
275
  return demo
276
 
277
  def main():
278
  try:
279
- logger.info("Starting application...")
280
  interface = GradioInterface()
281
  demo = interface.create_interface()
282
- logger.info("Launching Gradio interface...")
283
  demo.launch(
284
  server_name="0.0.0.0",
285
  server_port=7860,
 
13
  )
14
  logger = logging.getLogger(__name__)
15
 
 
 
 
16
  class HealthAssistant:
17
  def __init__(self, use_smaller_model=True):
18
  if use_smaller_model:
19
+ self.model_name = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext"
20
  else:
21
  self.model_name = "Qwen/Qwen2-VL-7B-Instruct"
22
 
 
43
  trust_remote_code=True
44
  )
45
 
 
 
46
  if self.tokenizer.pad_token is None:
47
  self.tokenizer.pad_token = self.tokenizer.eos_token
48
 
49
+ self.model = self.model.to("cpu")
50
  logger.info("Model loaded successfully")
51
  return True
52
 
 
54
  logger.error(f"Error in model initialization: {str(e)}")
55
  raise
56
 
57
+ def _detect_query_type(self, message: str) -> str:
58
+ """Detect type of medical query"""
59
+ message_lower = message.lower()
60
+
61
+ emergency_keywords = ["emergency", "severe pain", "chest pain", "can't breathe",
62
+ "unconscious", "stroke", "heart attack"]
63
+ if any(keyword in message_lower for keyword in emergency_keywords):
64
+ return "emergency_guidance"
65
+
66
+ symptom_keywords = ["symptom", "feel", "pain", "ache", "suffering", "experiencing"]
67
+ if any(keyword in message_lower for keyword in symptom_keywords):
68
+ return "symptom_check"
69
+
70
+ medication_keywords = ["medicine", "drug", "pill", "prescription", "medication", "dose"]
71
+ if any(keyword in message_lower for keyword in medication_keywords):
72
+ return "medication_info"
73
+
74
+ lifestyle_keywords = ["exercise", "diet", "sleep", "stress", "healthy", "lifestyle"]
75
+ if any(keyword in message_lower for keyword in lifestyle_keywords):
76
+ return "lifestyle_advice"
77
+
78
+ return "general"
79
+
80
+ def _get_specialized_prompt(self, query_type: str, message: str) -> str:
81
+ """Get specialized medical prompts"""
82
+ base_context = f"""Current Health Context:
83
+ {self._get_health_context()}
84
+
85
+ """
86
+
87
+ prompts = {
88
+ "symptom_check": f"""{base_context}
89
+ Analyze these symptoms professionally:
90
+ Patient's Description: {message}
91
+
92
+ Provide structured analysis:
93
+ 1. Symptoms identified
94
+ 2. Possible causes
95
+ 3. Severity assessment
96
+ 4. Recommended actions
97
+ 5. When to seek medical care
98
+ """,
99
+
100
+ "medication_info": f"""{base_context}
101
+ Regarding medication inquiry:
102
+ Query: {message}
103
+
104
+ Provide structured information:
105
+ 1. General medication information
106
+ 2. Common usage guidelines
107
+ 3. Important considerations
108
+ 4. General precautions
109
+ 5. When to consult healthcare provider
110
+ """,
111
+
112
+ "lifestyle_advice": f"""{base_context}
113
+ Health and Lifestyle Query:
114
+ Question: {message}
115
+
116
+ Provide structured guidance:
117
+ 1. Evidence-based recommendations
118
+ 2. Implementation steps
119
+ 3. Expected benefits
120
+ 4. Safety considerations
121
+ 5. Progress monitoring tips
122
+ """,
123
+
124
+ "emergency_guidance": f"""{base_context}
125
+ URGENT Health Situation:
126
+ Condition: {message}
127
+
128
+ Critical guidance:
129
+ 1. Severity assessment
130
+ 2. Immediate actions needed
131
+ 3. Emergency signs
132
+ 4. When to call emergency services
133
+ 5. Precautions while waiting
134
+ ⚠️ SEEK IMMEDIATE MEDICAL ATTENTION FOR EMERGENCIES
135
+ """,
136
+
137
+ "general": f"""{base_context}
138
+ Medical Query:
139
+ {message}
140
+
141
+ Provide structured response:
142
+ 1. Understanding of the query
143
+ 2. Relevant medical information
144
+ 3. Professional guidance
145
+ 4. Important considerations
146
+ 5. Additional recommendations
147
+ """
148
+ }
149
+
150
+ return prompts.get(query_type, prompts["general"])
151
 
152
  def generate_response(self, message: str, history: List = None) -> str:
153
  try:
154
+ if not hasattr(self, 'model') or self.model is None:
155
+ return "System is initializing. Please try again in a moment."
156
 
157
+ # Detect query type and get appropriate prompt
158
+ query_type = self._detect_query_type(message)
159
+ prompt = self._get_specialized_prompt(query_type, message)
160
 
161
+ # Add conversation history if available
162
+ if history:
163
+ prompt += "\n\nPrevious conversation context:"
164
+ for prev_msg, prev_response in history[-2:]: # Last 2 exchanges
165
+ prompt += f"\nQ: {prev_msg}\nA: {prev_response}\n"
166
+
167
  # Tokenize
168
  inputs = self.tokenizer(
169
  prompt,
 
177
  with torch.no_grad():
178
  outputs = self.model.generate(
179
  inputs["input_ids"],
180
+ max_new_tokens=150,
181
+ num_beams=2,
182
+ temperature=0.3,
 
183
  top_p=0.9,
184
+ no_repeat_ngram_size=3,
185
  pad_token_id=self.tokenizer.pad_token_id,
186
  eos_token_id=self.tokenizer.eos_token_id
187
  )
 
192
  skip_special_tokens=True
193
  )
194
 
195
+ # Clean and format response
196
+ response = self._format_response(response, query_type)
197
+
198
  # Cleanup
199
  del outputs, inputs
200
  gc.collect()
 
203
 
204
  except Exception as e:
205
  logger.error(f"Error generating response: {str(e)}")
206
+ return "I apologize, but I encountered an error. Please try rephrasing your question."
207
 
208
+ def _format_response(self, response: str, query_type: str) -> str:
209
+ """Format and clean the response"""
210
+ # Remove repeated headers and clean up
211
+ lines = [line.strip() for line in response.split('\n') if line.strip()]
212
+ clean_lines = []
213
+ seen = set()
214
+
215
+ for line in lines:
216
+ # Skip common headers and duplicates
217
+ if any(header in line for header in ["Location:", "Date:", "M.D.", "Medical"]):
218
+ continue
219
+ if line not in seen:
220
+ seen.add(line)
221
+ clean_lines.append(line)
222
 
223
+ # Add appropriate emoji based on query type
224
+ emoji_map = {
225
+ "emergency_guidance": "🚨",
226
+ "symptom_check": "πŸ”",
227
+ "medication_info": "πŸ’Š",
228
+ "lifestyle_advice": "πŸ’‘",
229
+ "general": "ℹ️"
230
+ }
231
 
232
+ emoji = emoji_map.get(query_type, "ℹ️")
 
 
 
233
 
234
+ # Combine and format
235
+ formatted_response = f"{emoji} " + "\n".join(clean_lines)
236
+
237
+ # Add disclaimer if needed
238
+ if query_type in ["emergency_guidance", "medication_info"]:
239
+ formatted_response += "\n\n⚠️ This is general information only. Always consult healthcare professionals for medical advice."
240
+
241
+ return formatted_response
242
 
243
  def _get_health_context(self) -> str:
244
+ """Get user's health context"""
245
  context_parts = []
246
 
247
  if self.metrics:
 
261
  med_info += f" | Note: {med['Notes']}"
262
  context_parts.append(med_info)
263
 
264
+ return "\n".join(context_parts) if context_parts else "No health data available"
 
265
  def add_metrics(self, weight: float, steps: int, sleep: float) -> bool:
266
  try:
267
  self.metrics.append({
 
292
  try:
293
  logger.info("Initializing Health Assistant...")
294
  self.assistant = HealthAssistant(use_smaller_model=True)
 
 
295
  logger.info("Health Assistant initialized successfully")
296
  except Exception as e:
297
  logger.error(f"Failed to initialize Health Assistant: {e}")
 
301
  if not message.strip():
302
  return "", history
303
 
304
+ # Generate response
305
  response = self.assistant.generate_response(message, history)
306
+
307
+ # Update history
308
  history.append([message, response])
309
+
310
+ # Clear input and return updated history
311
  return "", history
312
 
313
  def add_health_metrics(self, weight: float, steps: int, sleep: float) -> str:
314
  if not all([weight is not None, steps is not None, sleep is not None]):
315
  return "⚠️ Please fill in all metrics."
316
 
317
+ if weight <= 0 or steps < 0 or sleep < 0:
318
+ return "⚠️ Please enter valid positive numbers."
319
+
320
  if self.assistant.add_metrics(weight, steps, sleep):
321
+ return f"""βœ… Health metrics saved successfully!
322
+ β€’ Weight: {weight} kg
323
+ β€’ Steps: {steps}
324
+ β€’ Sleep: {sleep} hours"""
325
  return "❌ Error saving metrics."
326
 
327
  def add_medication_info(self, name: str, dosage: str, time: str, notes: str) -> str:
 
329
  return "⚠️ Please fill in all required fields."
330
 
331
  if self.assistant.add_medication(name, dosage, time, notes):
332
+ return f"""βœ… Medication added successfully!
333
+ β€’ Medication: {name}
334
+ β€’ Dosage: {dosage}
335
+ β€’ Time: {time}
336
+ β€’ Notes: {notes if notes else 'None'}"""
337
  return "❌ Error adding medication."
338
 
339
  def create_interface(self):
340
+ with gr.Blocks(title="Medical Health Assistant", theme=gr.themes.Soft()) as demo:
341
+ gr.Markdown("""
342
+ # πŸ₯ Medical Health Assistant
343
+
344
+ This AI assistant provides medical information and health guidance.
345
+ **Note**: This is not a replacement for professional medical advice.
346
+ """)
347
 
348
  with gr.Tabs():
349
  # Chat Interface
350
+ with gr.Tab("πŸ’¬ Medical Consultation"):
351
  chatbot = gr.Chatbot(
352
  value=[],
353
+ height=450,
354
+ bubble=True
355
  )
356
  with gr.Row():
357
  msg = gr.Textbox(
358
+ placeholder="Describe your medical concern... (Press Enter)",
359
  lines=2,
360
  show_label=False,
361
  scale=9
362
  )
363
+ send_btn = gr.Button("πŸ’¬ Send", scale=1)
364
+ clear_btn = gr.Button("πŸ”„ Clear Chat")
365
 
366
  # Health Metrics
367
  with gr.Tab("πŸ“Š Health Metrics"):
368
  with gr.Row():
369
+ with gr.Column():
370
+ gr.Markdown("### Enter Your Health Metrics")
371
+ weight_input = gr.Number(
372
+ label="Weight (kg)",
373
+ minimum=0,
374
+ maximum=500
375
+ )
376
+ steps_input = gr.Number(
377
+ label="Steps",
378
+ minimum=0,
379
+ maximum=100000
380
+ )
381
+ sleep_input = gr.Number(
382
+ label="Hours Slept",
383
+ minimum=0,
384
+ maximum=24
385
+ )
386
+ metrics_btn = gr.Button("πŸ“ Save Metrics")
387
+ metrics_status = gr.Markdown()
388
 
389
  # Medication Manager
390
  with gr.Tab("πŸ’Š Medication Manager"):
391
  with gr.Row():
392
+ with gr.Column():
393
+ gr.Markdown("### Add Medication Details")
394
+ med_name = gr.Textbox(
395
+ label="Medication Name",
396
+ placeholder="Enter medication name"
397
+ )
398
+ med_dosage = gr.Textbox(
399
+ label="Dosage",
400
+ placeholder="e.g., 500mg"
401
+ )
402
+ med_time = gr.Textbox(
403
+ label="Time",
404
+ placeholder="e.g., 9:00 AM"
405
+ )
406
+ med_notes = gr.Textbox(
407
+ label="Notes (optional)",
408
+ placeholder="Additional instructions or notes"
409
+ )
410
+ med_btn = gr.Button("πŸ’Š Add Medication")
411
+ med_status = gr.Markdown()
412
 
413
  # Event handlers
414
  msg.submit(self.chat_response, [msg, chatbot], [msg, chatbot])
 
427
  outputs=[med_status]
428
  )
429
 
430
+ # Add helpful information
431
+ gr.Markdown("""
432
+ ### ⚠️ Important Medical Disclaimer
433
+ This AI assistant provides general health information only.
434
+ - Not a replacement for professional medical advice
435
+ - Always consult healthcare professionals for medical decisions
436
+ - Seek immediate medical attention for emergencies
437
+ """)
438
+
439
+ # Enable queuing for better performance
440
  demo.queue()
441
 
442
  return demo
443
 
444
  def main():
445
  try:
446
+ logger.info("Starting Medical Health Assistant...")
447
  interface = GradioInterface()
448
  demo = interface.create_interface()
449
+ logger.info("Launching interface...")
450
  demo.launch(
451
  server_name="0.0.0.0",
452
  server_port=7860,