Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -129,7 +129,8 @@ def gradio_interface(patient_info, image):
|
|
129 |
"error": "Model initialization failed. Please check the logs for details.",
|
130 |
"status": "error"
|
131 |
}, indent=2)
|
132 |
-
|
|
|
133 |
# Process image if provided
|
134 |
image_analysis = None
|
135 |
if image is not None:
|
@@ -144,15 +145,15 @@ def gradio_interface(patient_info, image):
|
|
144 |
logger.info(f"Prediction: {prediction}")
|
145 |
# Format prediction results
|
146 |
image_analysis = {
|
147 |
-
"prediction":
|
148 |
"confidence": float(prediction[0][0]) * 100
|
149 |
}
|
150 |
logger.info(f"Image analysis results: {image_analysis}")
|
151 |
-
|
|
|
152 |
# Create chat session and submit query
|
153 |
session_id = create_chat_session()
|
154 |
-
llm_response = submit_query(session_id, patient_info
|
155 |
-
json.dumps(image_analysis) if image_analysis else None)
|
156 |
|
157 |
if not llm_response or 'data' not in llm_response or 'answer' not in llm_response['data']:
|
158 |
raise ValueError("Invalid response structure from LLM")
|
|
|
129 |
"error": "Model initialization failed. Please check the logs for details.",
|
130 |
"status": "error"
|
131 |
}, indent=2)
|
132 |
+
|
133 |
+
classes = ["Alzheimer's", "Stroke", "Tumor", "Normal"]
|
134 |
# Process image if provided
|
135 |
image_analysis = None
|
136 |
if image is not None:
|
|
|
145 |
logger.info(f"Prediction: {prediction}")
|
146 |
# Format prediction results
|
147 |
image_analysis = {
|
148 |
+
"prediction": classes[int(prediction[0][0])],
|
149 |
"confidence": float(prediction[0][0]) * 100
|
150 |
}
|
151 |
logger.info(f"Image analysis results: {image_analysis}")
|
152 |
+
|
153 |
+
patient_info += f"Prediction based on MRI images: {image_analysis["prediction"]}, Confidence: {image_analysis["confidence"]}"
|
154 |
# Create chat session and submit query
|
155 |
session_id = create_chat_session()
|
156 |
+
llm_response = submit_query(session_id, patient_info)
|
|
|
157 |
|
158 |
if not llm_response or 'data' not in llm_response or 'answer' not in llm_response['data']:
|
159 |
raise ValueError("Invalid response structure from LLM")
|