Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -357,7 +357,7 @@ class ChatBot:
|
|
357 |
self.history = []
|
358 |
|
359 |
def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
|
360 |
-
formatted_input = f"
|
361 |
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
|
362 |
response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
363 |
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
@@ -409,21 +409,37 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
409 |
print("Summary:", summary) # Debug print
|
410 |
print("Sources Info:", sources_info) # Debug print
|
411 |
|
412 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
413 |
if image_description:
|
414 |
-
|
415 |
-
|
416 |
-
|
|
|
417 |
final_response = process_summary_with_stablemed(summary)
|
418 |
print("Final Response:", final_response) # Debug print
|
419 |
|
420 |
# Evaluate hallucination
|
421 |
hallucination_label = evaluate_hallucination(final_response, summary)
|
422 |
print("Hallucination Label:", hallucination_label) # Debug print
|
423 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
424 |
except Exception as e:
|
425 |
print(f"An error occurred: {e}")
|
426 |
-
return "Error occurred during processing."
|
|
|
427 |
|
428 |
|
429 |
welcome_message = """
|
|
|
357 |
self.history = []
|
358 |
|
359 |
def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
|
360 |
+
formatted_input = f"{system_prompt}{user_input}"
|
361 |
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
|
362 |
response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
363 |
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
|
|
409 |
print("Summary:", summary) # Debug print
|
410 |
print("Sources Info:", sources_info) # Debug print
|
411 |
|
412 |
+
# Format Vectara response in Markdown
|
413 |
+
markdown_output = "### Vectara Response Summary\n"
|
414 |
+
markdown_output += f"* **Summary**: {summary}\n"
|
415 |
+
markdown_output += "### Sources Information\n"
|
416 |
+
for source in sources_info:
|
417 |
+
markdown_output += f"* {source}\n"
|
418 |
+
|
419 |
+
# Append the original image description in Markdown
|
420 |
if image_description:
|
421 |
+
markdown_output += "\n### Original Image Description\n"
|
422 |
+
markdown_output += image_description + "\n"
|
423 |
+
|
424 |
+
# Process the summary with Stablemed
|
425 |
final_response = process_summary_with_stablemed(summary)
|
426 |
print("Final Response:", final_response) # Debug print
|
427 |
|
428 |
# Evaluate hallucination
|
429 |
hallucination_label = evaluate_hallucination(final_response, summary)
|
430 |
print("Hallucination Label:", hallucination_label) # Debug print
|
431 |
+
|
432 |
+
# Add final response and hallucination label to Markdown output
|
433 |
+
markdown_output += "\n### Processed Summary with StableMed\n"
|
434 |
+
markdown_output += final_response + "\n"
|
435 |
+
markdown_output += "\n### Hallucination Evaluation\n"
|
436 |
+
markdown_output += f"* **Label**: {hallucination_label}\n"
|
437 |
+
|
438 |
+
return markdown_output
|
439 |
except Exception as e:
|
440 |
print(f"An error occurred: {e}")
|
441 |
+
return "Error occurred during processing."
|
442 |
+
|
443 |
|
444 |
|
445 |
welcome_message = """
|