Update app.py
Browse files
app.py
CHANGED
@@ -13,76 +13,106 @@ logging.basicConfig(
|
|
13 |
)
|
14 |
logger = logging.getLogger(__name__)
|
15 |
|
|
|
|
|
|
|
|
|
16 |
class HealthAssistant:
|
17 |
def __init__(self):
|
18 |
-
self.model_id = "microsoft/Phi-
|
19 |
self.model = None
|
20 |
self.tokenizer = None
|
21 |
self.pipe = None
|
22 |
self.metrics = []
|
23 |
self.medications = []
|
24 |
-
self.device = "cpu"
|
25 |
-
self.
|
|
|
26 |
|
27 |
def initialize_model(self):
|
28 |
try:
|
|
|
|
|
|
|
29 |
logger.info(f"Loading model: {self.model_id}")
|
30 |
|
31 |
-
# Initialize tokenizer
|
32 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
33 |
self.model_id,
|
34 |
-
trust_remote_code=True
|
|
|
|
|
35 |
)
|
36 |
logger.info("Tokenizer loaded")
|
37 |
|
38 |
-
#
|
39 |
self.model = AutoModelForCausalLM.from_pretrained(
|
40 |
self.model_id,
|
41 |
-
torch_dtype=torch.float32,
|
42 |
trust_remote_code=True,
|
43 |
-
device_map=None
|
|
|
44 |
).to(self.device)
|
45 |
-
logger.info("Model loaded on CPU")
|
46 |
|
|
|
|
|
47 |
# Setup pipeline
|
48 |
self.pipe = pipeline(
|
49 |
"text-generation",
|
50 |
model=self.model,
|
51 |
tokenizer=self.tokenizer,
|
52 |
-
device=self.device
|
|
|
53 |
)
|
54 |
-
logger.info("Pipeline created successfully")
|
55 |
|
|
|
|
|
56 |
return True
|
57 |
|
58 |
except Exception as e:
|
59 |
logger.error(f"Error in model initialization: {str(e)}")
|
60 |
raise
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
def generate_response(self, message: str, history: List = None) -> str:
|
63 |
try:
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
-
# Generation configuration
|
68 |
generation_args = {
|
69 |
-
"max_new_tokens":
|
70 |
"return_full_text": False,
|
71 |
"temperature": 0.7,
|
72 |
"do_sample": True,
|
73 |
"top_k": 50,
|
74 |
"top_p": 0.9,
|
75 |
-
"repetition_penalty": 1.1
|
|
|
|
|
76 |
}
|
77 |
|
78 |
-
|
79 |
-
output = self.pipe(
|
80 |
-
prompt,
|
81 |
-
**generation_args
|
82 |
-
)
|
83 |
response = output[0]['generated_text']
|
84 |
|
85 |
-
# Basic cleanup
|
86 |
gc.collect()
|
87 |
|
88 |
return response.strip()
|
@@ -90,22 +120,22 @@ class HealthAssistant:
|
|
90 |
except Exception as e:
|
91 |
logger.error(f"Error generating response: {str(e)}")
|
92 |
return "I apologize, but I encountered an error. Please try again."
|
|
|
|
|
|
|
|
|
93 |
|
94 |
def _prepare_prompt(self, message: str, history: List = None) -> str:
|
95 |
-
"""Prepare prompt with context and history"""
|
96 |
prompt_parts = [
|
97 |
-
"
|
98 |
-
|
99 |
-
"\nCurrent Health Information:",
|
100 |
-
self._get_health_context(),
|
101 |
-
"\nConversation:"
|
102 |
]
|
103 |
|
104 |
if history:
|
105 |
-
for prev_msg, prev_response in history
|
106 |
prompt_parts.extend([
|
107 |
-
f"Human: {prev_msg}",
|
108 |
-
f"Assistant: {prev_response}"
|
109 |
])
|
110 |
|
111 |
prompt_parts.extend([
|
@@ -116,29 +146,26 @@ class HealthAssistant:
|
|
116 |
return "\n".join(prompt_parts)
|
117 |
|
118 |
def _get_health_context(self) -> str:
|
119 |
-
|
120 |
-
|
|
|
|
|
121 |
if self.metrics:
|
122 |
latest = self.metrics[-1]
|
123 |
-
|
124 |
-
|
125 |
-
f"- Weight: {latest.get('Weight', 'N/A')} kg",
|
126 |
-
f"- Steps: {latest.get('Steps', 'N/A')}",
|
127 |
-
f"- Sleep: {latest.get('Sleep', 'N/A')} hours"
|
128 |
-
])
|
129 |
-
|
130 |
if self.medications:
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
med_info += f" | Note: {med['Notes']}"
|
136 |
-
context_parts.append(med_info)
|
137 |
-
|
138 |
-
return "\n".join(context_parts) if context_parts else "No health data recorded"
|
139 |
|
140 |
def add_metrics(self, weight: float, steps: int, sleep: float) -> bool:
|
141 |
try:
|
|
|
|
|
|
|
|
|
142 |
self.metrics.append({
|
143 |
'Weight': weight,
|
144 |
'Steps': steps,
|
@@ -151,6 +178,10 @@ class HealthAssistant:
|
|
151 |
|
152 |
def add_medication(self, name: str, dosage: str, time: str, notes: str = "") -> bool:
|
153 |
try:
|
|
|
|
|
|
|
|
|
154 |
self.medications.append({
|
155 |
'Medication': name,
|
156 |
'Dosage': dosage,
|
@@ -176,9 +207,18 @@ class GradioInterface:
|
|
176 |
if not message.strip():
|
177 |
return "", history
|
178 |
|
179 |
-
|
180 |
-
|
181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
|
183 |
def add_health_metrics(self, weight: float, steps: int, sleep: float) -> str:
|
184 |
if not all([weight is not None, steps is not None, sleep is not None]):
|
@@ -210,9 +250,7 @@ class GradioInterface:
|
|
210 |
with gr.Blocks(title="Medical Health Assistant") as demo:
|
211 |
gr.Markdown("""
|
212 |
# 🏥 Medical Health Assistant
|
213 |
-
|
214 |
-
This AI assistant provides medical information and health guidance.
|
215 |
-
**Note**: This is not a replacement for professional medical advice.
|
216 |
""")
|
217 |
|
218 |
with gr.Tabs():
|
@@ -220,13 +258,13 @@ class GradioInterface:
|
|
220 |
with gr.Tab("💬 Medical Consultation"):
|
221 |
chatbot = gr.Chatbot(
|
222 |
value=[],
|
223 |
-
height=
|
224 |
show_label=False
|
225 |
)
|
226 |
with gr.Row():
|
227 |
msg = gr.Textbox(
|
228 |
-
placeholder="
|
229 |
-
lines=
|
230 |
show_label=False,
|
231 |
scale=9
|
232 |
)
|
@@ -235,50 +273,48 @@ class GradioInterface:
|
|
235 |
|
236 |
# Health Metrics
|
237 |
with gr.Tab("📊 Health Metrics"):
|
|
|
238 |
with gr.Row():
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
metrics_btn = gr.Button("Save Metrics")
|
257 |
-
metrics_status = gr.Markdown()
|
258 |
|
259 |
# Medication Manager
|
260 |
with gr.Tab("💊 Medication Manager"):
|
|
|
|
|
|
|
|
|
|
|
261 |
with gr.Row():
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
med_notes = gr.Textbox(
|
277 |
-
label="Notes (optional)",
|
278 |
-
placeholder="Additional instructions or notes"
|
279 |
-
)
|
280 |
-
med_btn = gr.Button("Add Medication")
|
281 |
-
med_status = gr.Markdown()
|
282 |
|
283 |
# Event handlers
|
284 |
msg.submit(self.chat_response, [msg, chatbot], [msg, chatbot])
|
@@ -298,27 +334,22 @@ class GradioInterface:
|
|
298 |
)
|
299 |
|
300 |
gr.Markdown("""
|
301 |
-
### ⚠️
|
302 |
-
This AI assistant provides general health information only.
|
303 |
-
|
304 |
-
- Always consult healthcare professionals for medical decisions
|
305 |
-
- Seek immediate medical attention for emergencies
|
306 |
""")
|
307 |
|
308 |
-
demo.queue()
|
309 |
|
310 |
return demo
|
311 |
|
312 |
def main():
|
313 |
try:
|
314 |
-
logger.info("Starting Medical Health Assistant...")
|
315 |
interface = GradioInterface()
|
316 |
demo = interface.create_interface()
|
317 |
-
logger.info("Launching interface...")
|
318 |
demo.launch(
|
319 |
-
|
320 |
-
|
321 |
-
share=False
|
322 |
)
|
323 |
except Exception as e:
|
324 |
logger.error(f"Error starting application: {e}")
|
|
|
13 |
)
|
14 |
logger = logging.getLogger(__name__)
|
15 |
|
16 |
+
# Set environment variables for memory optimization
|
17 |
+
os.environ['TRANSFORMERS_CACHE'] = '/home/user/.cache/huggingface/hub' # HF Spaces cache directory
|
18 |
+
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
|
19 |
+
|
20 |
class HealthAssistant:
|
21 |
def __init__(self):
|
22 |
+
self.model_id = "microsoft/Phi-2" # Using smaller Phi-2 model instead of Phi-3
|
23 |
self.model = None
|
24 |
self.tokenizer = None
|
25 |
self.pipe = None
|
26 |
self.metrics = []
|
27 |
self.medications = []
|
28 |
+
self.device = "cpu"
|
29 |
+
self.is_model_loaded = False
|
30 |
+
self.max_history_length = 2
|
31 |
|
32 |
def initialize_model(self):
|
33 |
try:
|
34 |
+
if self.is_model_loaded:
|
35 |
+
return True
|
36 |
+
|
37 |
logger.info(f"Loading model: {self.model_id}")
|
38 |
|
39 |
+
# Initialize tokenizer with optimizations
|
40 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
41 |
self.model_id,
|
42 |
+
trust_remote_code=True,
|
43 |
+
model_max_length=256,
|
44 |
+
padding_side="left"
|
45 |
)
|
46 |
logger.info("Tokenizer loaded")
|
47 |
|
48 |
+
# Load model with memory optimizations
|
49 |
self.model = AutoModelForCausalLM.from_pretrained(
|
50 |
self.model_id,
|
51 |
+
torch_dtype=torch.float32,
|
52 |
trust_remote_code=True,
|
53 |
+
device_map=None,
|
54 |
+
low_cpu_mem_usage=True
|
55 |
).to(self.device)
|
|
|
56 |
|
57 |
+
gc.collect()
|
58 |
+
|
59 |
# Setup pipeline
|
60 |
self.pipe = pipeline(
|
61 |
"text-generation",
|
62 |
model=self.model,
|
63 |
tokenizer=self.tokenizer,
|
64 |
+
device=self.device,
|
65 |
+
model_kwargs={"low_cpu_mem_usage": True}
|
66 |
)
|
|
|
67 |
|
68 |
+
self.is_model_loaded = True
|
69 |
+
logger.info("Model initialized successfully")
|
70 |
return True
|
71 |
|
72 |
except Exception as e:
|
73 |
logger.error(f"Error in model initialization: {str(e)}")
|
74 |
raise
|
75 |
|
76 |
+
def unload_model(self):
|
77 |
+
"""Unload model to free up memory"""
|
78 |
+
if hasattr(self, 'model') and self.model is not None:
|
79 |
+
del self.model
|
80 |
+
self.model = None
|
81 |
+
if hasattr(self, 'pipe') and self.pipe is not None:
|
82 |
+
del self.pipe
|
83 |
+
self.pipe = None
|
84 |
+
if hasattr(self, 'tokenizer') and self.tokenizer is not None:
|
85 |
+
del self.tokenizer
|
86 |
+
self.tokenizer = None
|
87 |
+
self.is_model_loaded = False
|
88 |
+
gc.collect()
|
89 |
+
logger.info("Model unloaded successfully")
|
90 |
+
|
91 |
def generate_response(self, message: str, history: List = None) -> str:
|
92 |
try:
|
93 |
+
if not self.is_model_loaded:
|
94 |
+
self.initialize_model()
|
95 |
+
|
96 |
+
# Limit message length
|
97 |
+
message = message[:200] # Truncate long messages
|
98 |
+
|
99 |
+
prompt = self._prepare_prompt(message, history[-self.max_history_length:] if history else None)
|
100 |
|
|
|
101 |
generation_args = {
|
102 |
+
"max_new_tokens": 200,
|
103 |
"return_full_text": False,
|
104 |
"temperature": 0.7,
|
105 |
"do_sample": True,
|
106 |
"top_k": 50,
|
107 |
"top_p": 0.9,
|
108 |
+
"repetition_penalty": 1.1,
|
109 |
+
"num_return_sequences": 1,
|
110 |
+
"batch_size": 1
|
111 |
}
|
112 |
|
113 |
+
output = self.pipe(prompt, **generation_args)
|
|
|
|
|
|
|
|
|
114 |
response = output[0]['generated_text']
|
115 |
|
|
|
116 |
gc.collect()
|
117 |
|
118 |
return response.strip()
|
|
|
120 |
except Exception as e:
|
121 |
logger.error(f"Error generating response: {str(e)}")
|
122 |
return "I apologize, but I encountered an error. Please try again."
|
123 |
+
finally:
|
124 |
+
# Attempt to free memory after each generation
|
125 |
+
if torch.cuda.is_available():
|
126 |
+
torch.cuda.empty_cache()
|
127 |
|
128 |
def _prepare_prompt(self, message: str, history: List = None) -> str:
|
|
|
129 |
prompt_parts = [
|
130 |
+
"Medical AI assistant. Be professional, include disclaimers.",
|
131 |
+
self._get_health_context()
|
|
|
|
|
|
|
132 |
]
|
133 |
|
134 |
if history:
|
135 |
+
for prev_msg, prev_response in history:
|
136 |
prompt_parts.extend([
|
137 |
+
f"Human: {prev_msg[:100]}", # Truncate history messages
|
138 |
+
f"Assistant: {prev_response[:100]}"
|
139 |
])
|
140 |
|
141 |
prompt_parts.extend([
|
|
|
146 |
return "\n".join(prompt_parts)
|
147 |
|
148 |
def _get_health_context(self) -> str:
|
149 |
+
if not self.metrics and not self.medications:
|
150 |
+
return "No health data"
|
151 |
+
|
152 |
+
context = []
|
153 |
if self.metrics:
|
154 |
latest = self.metrics[-1]
|
155 |
+
context.append(f"Metrics: W:{latest['Weight']}kg S:{latest['Steps']} Sl:{latest['Sleep']}h")
|
156 |
+
|
|
|
|
|
|
|
|
|
|
|
157 |
if self.medications:
|
158 |
+
meds = [f"{m['Medication']}({m['Dosage']}@{m['Time']})" for m in self.medications[-2:]]
|
159 |
+
context.append("Meds: " + ", ".join(meds))
|
160 |
+
|
161 |
+
return " | ".join(context)
|
|
|
|
|
|
|
|
|
162 |
|
163 |
def add_metrics(self, weight: float, steps: int, sleep: float) -> bool:
|
164 |
try:
|
165 |
+
# Keep only last 5 metrics
|
166 |
+
if len(self.metrics) >= 5:
|
167 |
+
self.metrics.pop(0)
|
168 |
+
|
169 |
self.metrics.append({
|
170 |
'Weight': weight,
|
171 |
'Steps': steps,
|
|
|
178 |
|
179 |
def add_medication(self, name: str, dosage: str, time: str, notes: str = "") -> bool:
|
180 |
try:
|
181 |
+
# Keep only last 5 medications
|
182 |
+
if len(self.medications) >= 5:
|
183 |
+
self.medications.pop(0)
|
184 |
+
|
185 |
self.medications.append({
|
186 |
'Medication': name,
|
187 |
'Dosage': dosage,
|
|
|
207 |
if not message.strip():
|
208 |
return "", history
|
209 |
|
210 |
+
try:
|
211 |
+
response = self.assistant.generate_response(message, history)
|
212 |
+
history.append([message, response])
|
213 |
+
|
214 |
+
# Unload model periodically
|
215 |
+
if len(history) % 3 == 0:
|
216 |
+
self.assistant.unload_model()
|
217 |
+
|
218 |
+
return "", history
|
219 |
+
except Exception as e:
|
220 |
+
logger.error(f"Error in chat response: {e}")
|
221 |
+
return "", history + [[message, "I apologize, but I encountered an error. Please try again."]]
|
222 |
|
223 |
def add_health_metrics(self, weight: float, steps: int, sleep: float) -> str:
|
224 |
if not all([weight is not None, steps is not None, sleep is not None]):
|
|
|
250 |
with gr.Blocks(title="Medical Health Assistant") as demo:
|
251 |
gr.Markdown("""
|
252 |
# 🏥 Medical Health Assistant
|
253 |
+
This AI assistant provides general health information and guidance.
|
|
|
|
|
254 |
""")
|
255 |
|
256 |
with gr.Tabs():
|
|
|
258 |
with gr.Tab("💬 Medical Consultation"):
|
259 |
chatbot = gr.Chatbot(
|
260 |
value=[],
|
261 |
+
height=400,
|
262 |
show_label=False
|
263 |
)
|
264 |
with gr.Row():
|
265 |
msg = gr.Textbox(
|
266 |
+
placeholder="Ask your health question...",
|
267 |
+
lines=1,
|
268 |
show_label=False,
|
269 |
scale=9
|
270 |
)
|
|
|
273 |
|
274 |
# Health Metrics
|
275 |
with gr.Tab("📊 Health Metrics"):
|
276 |
+
gr.Markdown("### Track Your Health Metrics")
|
277 |
with gr.Row():
|
278 |
+
weight_input = gr.Number(
|
279 |
+
label="Weight (kg)",
|
280 |
+
minimum=0,
|
281 |
+
maximum=500
|
282 |
+
)
|
283 |
+
steps_input = gr.Number(
|
284 |
+
label="Steps",
|
285 |
+
minimum=0,
|
286 |
+
maximum=100000
|
287 |
+
)
|
288 |
+
sleep_input = gr.Number(
|
289 |
+
label="Hours Slept",
|
290 |
+
minimum=0,
|
291 |
+
maximum=24
|
292 |
+
)
|
293 |
+
metrics_btn = gr.Button("Save Metrics")
|
294 |
+
metrics_status = gr.Markdown()
|
|
|
|
|
295 |
|
296 |
# Medication Manager
|
297 |
with gr.Tab("💊 Medication Manager"):
|
298 |
+
gr.Markdown("### Track Your Medications")
|
299 |
+
med_name = gr.Textbox(
|
300 |
+
label="Medication Name",
|
301 |
+
placeholder="Enter medication name"
|
302 |
+
)
|
303 |
with gr.Row():
|
304 |
+
med_dosage = gr.Textbox(
|
305 |
+
label="Dosage",
|
306 |
+
placeholder="e.g., 500mg"
|
307 |
+
)
|
308 |
+
med_time = gr.Textbox(
|
309 |
+
label="Time",
|
310 |
+
placeholder="e.g., 9:00 AM"
|
311 |
+
)
|
312 |
+
med_notes = gr.Textbox(
|
313 |
+
label="Notes (optional)",
|
314 |
+
placeholder="Additional instructions or notes"
|
315 |
+
)
|
316 |
+
med_btn = gr.Button("Add Medication")
|
317 |
+
med_status = gr.Markdown()
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
|
319 |
# Event handlers
|
320 |
msg.submit(self.chat_response, [msg, chatbot], [msg, chatbot])
|
|
|
334 |
)
|
335 |
|
336 |
gr.Markdown("""
|
337 |
+
### ⚠️ Medical Disclaimer
|
338 |
+
This AI assistant provides general health information only. Not a replacement for professional medical advice.
|
339 |
+
Always consult healthcare professionals for medical decisions.
|
|
|
|
|
340 |
""")
|
341 |
|
342 |
+
demo.queue(concurrency_count=1, max_size=5)
|
343 |
|
344 |
return demo
|
345 |
|
346 |
def main():
|
347 |
try:
|
|
|
348 |
interface = GradioInterface()
|
349 |
demo = interface.create_interface()
|
|
|
350 |
demo.launch(
|
351 |
+
show_error=True,
|
352 |
+
share=True
|
|
|
353 |
)
|
354 |
except Exception as e:
|
355 |
logger.error(f"Error starting application: {e}")
|