VHA1 / app.py
lukiod's picture
Update app.py
f973312 verified
raw
history blame
11.9 kB
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import logging
from typing import List, Dict
import gc
import os
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Set random seed for reproducibility
torch.random.manual_seed(0)
class HealthAssistant:
def __init__(self):
self.model_id = "microsoft/Phi-3-small-128k-instruct"
self.model = None
self.tokenizer = None
self.pipe = None
self.metrics = []
self.medications = []
self.initialize_model()
def initialize_model(self):
try:
logger.info(f"Loading model: {self.model_id}")
# Initialize tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_id,
trust_remote_code=True
)
logger.info("Tokenizer loaded")
# Initialize model
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id,
torch_dtype="auto",
trust_remote_code=True
)
# Set device
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = self.model.to(self.device)
logger.info(f"Model loaded on {self.device}")
# Setup pipeline
self.pipe = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
device=self.device
)
logger.info("Pipeline created successfully")
return True
except Exception as e:
logger.error(f"Error in model initialization: {str(e)}")
raise
def _prepare_prompt(self, message: str, history: List = None) -> str:
"""Prepare prompt with context and history"""
prompt_parts = [
"You are a medical AI assistant providing healthcare information and guidance.",
"Always be professional and include appropriate medical disclaimers.",
"\nCurrent Health Information:",
self._get_health_context(),
"\nConversation:"
]
if history:
for prev_msg, prev_response in history[-3:]:
prompt_parts.extend([
f"Human: {prev_msg}",
f"Assistant: {prev_response}"
])
prompt_parts.extend([
f"Human: {message}",
"Assistant:"
])
return "\n".join(prompt_parts)
def generate_response(self, message: str, history: List = None) -> str:
try:
# Prepare prompt
prompt = self._prepare_prompt(message, history)
# Generation configuration
generation_args = {
"max_new_tokens": 500,
"return_full_text": False,
"temperature": 0.7,
"do_sample": True,
"top_k": 50,
"top_p": 0.9,
"repetition_penalty": 1.1
}
# Generate response
output = self.pipe(prompt, **generation_args)
response = output[0]['generated_text']
# Cleanup
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
return response.strip()
except Exception as e:
logger.error(f"Error generating response: {str(e)}")
return "I apologize, but I encountered an error. Please try again."
def _get_health_context(self) -> str:
context_parts = []
if self.metrics:
latest = self.metrics[-1]
context_parts.extend([
"Recent Health Metrics:",
f"- Weight: {latest.get('Weight', 'N/A')} kg",
f"- Steps: {latest.get('Steps', 'N/A')}",
f"- Sleep: {latest.get('Sleep', 'N/A')} hours"
])
if self.medications:
context_parts.append("\nCurrent Medications:")
for med in self.medications:
med_info = f"- {med['Medication']} ({med['Dosage']}) at {med['Time']}"
if med.get('Notes'):
med_info += f" | Note: {med['Notes']}"
context_parts.append(med_info)
return "\n".join(context_parts) if context_parts else "No health data recorded"
def add_metrics(self, weight: float, steps: int, sleep: float) -> bool:
try:
self.metrics.append({
'Weight': weight,
'Steps': steps,
'Sleep': sleep
})
return True
except Exception as e:
logger.error(f"Error adding metrics: {e}")
return False
def add_medication(self, name: str, dosage: str, time: str, notes: str = "") -> bool:
try:
self.medications.append({
'Medication': name,
'Dosage': dosage,
'Time': time,
'Notes': notes
})
return True
except Exception as e:
logger.error(f"Error adding medication: {e}")
return False
class GradioInterface:
def __init__(self):
try:
logger.info("Initializing Health Assistant...")
self.assistant = HealthAssistant()
logger.info("Health Assistant initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize Health Assistant: {e}")
raise
def chat_response(self, message: str, history: List) -> tuple:
if not message.strip():
return "", history
response = self.assistant.generate_response(message, history)
history.append([message, response])
return "", history
def add_health_metrics(self, weight: float, steps: int, sleep: float) -> str:
if not all([weight is not None, steps is not None, sleep is not None]):
return "⚠️ Please fill in all metrics."
if weight <= 0 or steps < 0 or sleep < 0:
return "⚠️ Please enter valid positive numbers."
if self.assistant.add_metrics(weight, steps, sleep):
return f"""βœ… Health metrics saved successfully!
β€’ Weight: {weight} kg
β€’ Steps: {steps}
β€’ Sleep: {sleep} hours"""
return "❌ Error saving metrics."
def add_medication_info(self, name: str, dosage: str, time: str, notes: str) -> str:
if not all([name, dosage, time]):
return "⚠️ Please fill in all required fields."
if self.assistant.add_medication(name, dosage, time, notes):
return f"""βœ… Medication added successfully!
β€’ Medication: {name}
β€’ Dosage: {dosage}
β€’ Time: {time}
β€’ Notes: {notes if notes else 'None'}"""
return "❌ Error adding medication."
def create_interface(self):
with gr.Blocks(title="Medical Health Assistant") as demo:
gr.Markdown("""
# πŸ₯ Medical Health Assistant
This AI assistant provides medical information and health guidance.
**Note**: This is not a replacement for professional medical advice.
""")
with gr.Tabs():
# Chat Interface
with gr.Tab("πŸ’¬ Medical Consultation"):
chatbot = gr.Chatbot(
value=[],
height=450,
show_label=False
)
with gr.Row():
msg = gr.Textbox(
placeholder="Describe your medical concern... (Press Enter)",
lines=2,
show_label=False,
scale=9
)
send_btn = gr.Button("Send", scale=1)
clear_btn = gr.Button("Clear Chat")
# Health Metrics
with gr.Tab("πŸ“Š Health Metrics"):
with gr.Row():
with gr.Column():
gr.Markdown("### Enter Your Health Metrics")
weight_input = gr.Number(
label="Weight (kg)",
minimum=0,
maximum=500
)
steps_input = gr.Number(
label="Steps",
minimum=0,
maximum=100000
)
sleep_input = gr.Number(
label="Hours Slept",
minimum=0,
maximum=24
)
metrics_btn = gr.Button("Save Metrics")
metrics_status = gr.Markdown()
# Medication Manager
with gr.Tab("πŸ’Š Medication Manager"):
with gr.Row():
with gr.Column():
gr.Markdown("### Add Medication Details")
med_name = gr.Textbox(
label="Medication Name",
placeholder="Enter medication name"
)
med_dosage = gr.Textbox(
label="Dosage",
placeholder="e.g., 500mg"
)
med_time = gr.Textbox(
label="Time",
placeholder="e.g., 9:00 AM"
)
med_notes = gr.Textbox(
label="Notes (optional)",
placeholder="Additional instructions or notes"
)
med_btn = gr.Button("Add Medication")
med_status = gr.Markdown()
# Event handlers
msg.submit(self.chat_response, [msg, chatbot], [msg, chatbot])
send_btn.click(self.chat_response, [msg, chatbot], [msg, chatbot])
clear_btn.click(lambda: [], None, chatbot)
metrics_btn.click(
self.add_health_metrics,
inputs=[weight_input, steps_input, sleep_input],
outputs=[metrics_status]
)
med_btn.click(
self.add_medication_info,
inputs=[med_name, med_dosage, med_time, med_notes],
outputs=[med_status]
)
gr.Markdown("""
### ⚠️ Important Medical Disclaimer
This AI assistant provides general health information only.
- Not a replacement for professional medical advice
- Always consult healthcare professionals for medical decisions
- Seek immediate medical attention for emergencies
""")
demo.queue()
return demo
def main():
try:
logger.info("Starting Medical Health Assistant...")
interface = GradioInterface()
demo = interface.create_interface()
logger.info("Launching interface...")
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)
except Exception as e:
logger.error(f"Error starting application: {e}")
raise
if __name__ == "__main__":
main()