Spaces:
Runtime error
Runtime error
Refactor / Tidy
Browse files
app.py
CHANGED
@@ -2,28 +2,15 @@
|
|
2 |
|
3 |
from gradio_client import Client
|
4 |
import numpy as np
|
5 |
-
import base64
|
6 |
import gradio as gr
|
7 |
-
import tempfile
|
8 |
import requests
|
9 |
import json
|
10 |
import dotenv
|
11 |
-
from scipy.io.wavfile import write
|
12 |
import soundfile as sf
|
13 |
-
from openai import OpenAI
|
14 |
import time
|
15 |
-
import
|
16 |
from PIL import Image
|
17 |
-
import
|
18 |
-
import hashlib
|
19 |
-
import datetime
|
20 |
-
from utils import build_logger
|
21 |
-
from transformers import AutoTokenizer, MistralForCausalLM
|
22 |
-
import torch
|
23 |
-
import random
|
24 |
-
from textwrap import wrap
|
25 |
-
import transformers
|
26 |
-
from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
|
27 |
from peft import PeftModel, PeftConfig
|
28 |
import torch
|
29 |
import os
|
@@ -40,28 +27,34 @@ base_model_id = os.getenv('BASE_MODEL_ID', 'default_base_model_id')
|
|
40 |
model_directory = os.getenv('MODEL_DIRECTORY', 'default_model_directory')
|
41 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
42 |
|
|
|
|
|
|
|
43 |
|
44 |
-
def check_hallucination(assertion,citation):
|
45 |
-
|
46 |
-
|
47 |
-
payload = {"inputs"
|
48 |
|
49 |
-
response = requests.post(
|
50 |
output = response.json()
|
51 |
output = output[0][0]["score"]
|
52 |
|
53 |
return f"**hallucination score:** {output}"
|
54 |
|
|
|
55 |
# Define the API parameters
|
56 |
-
|
|
|
|
|
57 |
|
58 |
-
headers = {"Authorization": f"Bearer {HuggingFace_Token}"}
|
59 |
|
60 |
# Function to query the API
|
61 |
def query(payload):
|
62 |
-
response = requests.post(
|
63 |
return response.json()
|
64 |
|
|
|
65 |
# Function to evaluate hallucination
|
66 |
def evaluate_hallucination(input1, input2):
|
67 |
# Combine the inputs
|
@@ -81,6 +74,7 @@ def evaluate_hallucination(input1, input2):
|
|
81 |
|
82 |
return label
|
83 |
|
|
|
84 |
def save_audio(audio_input, output_dir="saved_audio"):
|
85 |
if not os.path.exists(output_dir):
|
86 |
os.makedirs(output_dir)
|
@@ -97,13 +91,14 @@ def save_audio(audio_input, output_dir="saved_audio"):
|
|
97 |
|
98 |
return file_path
|
99 |
|
|
|
100 |
def save_image(image_input, output_dir="saved_images"):
|
101 |
if not os.path.exists(output_dir):
|
102 |
os.makedirs(output_dir)
|
103 |
|
104 |
# Assuming image_input is a NumPy array
|
105 |
if isinstance(image_input, np.ndarray):
|
106 |
-
# Convert NumPy
|
107 |
image = Image.fromarray(image_input)
|
108 |
|
109 |
# Generate a unique file name
|
@@ -118,12 +113,11 @@ def save_image(image_input, output_dir="saved_images"):
|
|
118 |
raise ValueError("Invalid image input type")
|
119 |
|
120 |
|
121 |
-
|
122 |
def process_speech(input_language, audio_input):
|
123 |
"""
|
124 |
processing sound using seamless_m4t
|
125 |
"""
|
126 |
-
if audio_input is None
|
127 |
return "no audio or audio did not save yet \nplease try again ! "
|
128 |
print(f"audio : {audio_input}")
|
129 |
print(f"audio type : {type(audio_input)}")
|
@@ -131,16 +125,16 @@ def process_speech(input_language, audio_input):
|
|
131 |
"S2TT",
|
132 |
"file",
|
133 |
None,
|
134 |
-
audio_input,
|
135 |
"",
|
136 |
-
input_language
|
137 |
-
"English"
|
138 |
api_name="/run",
|
139 |
)
|
140 |
-
out = out[1]
|
141 |
-
try
|
142 |
return f"{out}"
|
143 |
-
except Exception as e
|
144 |
return f"{e}"
|
145 |
|
146 |
|
@@ -165,7 +159,8 @@ def convert_text_to_speech(input_text, source_language, target_language):
|
|
165 |
# Initialize variables
|
166 |
original_audio_file = None
|
167 |
translated_text = ""
|
168 |
-
|
|
|
169 |
# Check if result contains files
|
170 |
if isinstance(result, list) and len(result) > 1:
|
171 |
downloaded_files = []
|
@@ -197,7 +192,8 @@ def convert_text_to_speech(input_text, source_language, target_language):
|
|
197 |
# Return a concise error message
|
198 |
return f"Error in text-to-speech conversion: {str(e)}", ""
|
199 |
|
200 |
-
return "Unexpected result format or insufficient data received.", ""
|
|
|
201 |
|
202 |
def process_image(image_input):
|
203 |
# Initialize the Gradio client with the URL of the Gradio server
|
@@ -220,14 +216,14 @@ def query_vectara(text):
|
|
220 |
user_message = text
|
221 |
|
222 |
# Read authentication parameters from the .env file
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
|
227 |
# Define the headers
|
228 |
api_key_header = {
|
229 |
-
"customer-id":
|
230 |
-
"x-api-key":
|
231 |
}
|
232 |
|
233 |
# Define the request body in the structure provided in the example
|
@@ -254,8 +250,8 @@ def query_vectara(text):
|
|
254 |
},
|
255 |
"corpusKey": [
|
256 |
{
|
257 |
-
"customerId":
|
258 |
-
"corpusId":
|
259 |
"semantics": 0,
|
260 |
"metadataFilter": "",
|
261 |
"lexicalInterpolationConfig": {
|
@@ -327,6 +323,8 @@ def wrap_text(text, width=90):
|
|
327 |
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
|
328 |
wrapped_text = '\n'.join(wrapped_lines)
|
329 |
return wrapped_text
|
|
|
|
|
330 |
def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
|
331 |
|
332 |
# Combine user input and system prompt
|
@@ -336,15 +334,15 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
|
|
336 |
encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
|
337 |
model_inputs = encodeds.to(device)
|
338 |
|
339 |
-
# Generate a response using the model
|
340 |
-
output =
|
341 |
**model_inputs,
|
342 |
-
max_length=
|
343 |
use_cache=True,
|
344 |
early_stopping=True,
|
345 |
-
bos_token_id=
|
346 |
-
eos_token_id=
|
347 |
-
pad_token_id=
|
348 |
temperature=0.1,
|
349 |
do_sample=True
|
350 |
)
|
@@ -354,6 +352,7 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
|
|
354 |
|
355 |
return response_text
|
356 |
|
|
|
357 |
# Instantiate the Tokenizer
|
358 |
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t", token=hf_token, trust_remote_code=True, padding_side="left")
|
359 |
# tokenizer = AutoTokenizer.from_pretrained("Tonic/stablemed", trust_remote_code=True, padding_side="left")
|
@@ -370,18 +369,20 @@ class ChatBot:
|
|
370 |
def __init__(self):
|
371 |
self.history = []
|
372 |
|
373 |
-
def
|
374 |
formatted_input = f"{system_prompt}{user_input}"
|
375 |
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
|
376 |
response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
377 |
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
378 |
return response_text
|
379 |
|
|
|
380 |
bot = ChatBot()
|
381 |
|
|
|
382 |
def process_summary_with_stablemed(summary):
|
383 |
system_prompt = "You are a medical instructor . Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
|
384 |
-
response_text = bot.
|
385 |
return response_text
|
386 |
|
387 |
|
@@ -391,11 +392,9 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
391 |
try:
|
392 |
|
393 |
combined_text = ""
|
394 |
-
image_description = ""
|
395 |
markdown_output = ""
|
396 |
image_text = ""
|
397 |
-
|
398 |
-
audio_output = ""
|
399 |
# Debugging print statement
|
400 |
print(f"Image Input Type: {type(image_input)}, Audio Input Type: {type(audio_input)}")
|
401 |
|
@@ -463,6 +462,7 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
463 |
except Exception as e:
|
464 |
return f"Error occurred during processing: {e}. No hallucination evaluation.", None
|
465 |
|
|
|
466 |
welcome_message = """
|
467 |
# 👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷
|
468 |
|
@@ -599,12 +599,12 @@ def create_interface():
|
|
599 |
|
600 |
with gr.Accordion("Use Voice", open=False) as voice_accordion:
|
601 |
audio_input = gr.Audio(label="Speak")
|
602 |
-
audio_output = gr.Markdown(label="Output text") # Markdown component for audio
|
603 |
gr.Examples([["audio1.wav"],["audio2.wav"],],inputs=[audio_input])
|
604 |
|
605 |
with gr.Accordion("Use a Picture", open=False) as picture_accordion:
|
606 |
image_input = gr.Image(label="Upload image")
|
607 |
-
image_output = gr.Markdown(label="Output text") # Markdown component for image
|
608 |
gr.Examples([["image1.png"], ["image2.jpeg"], ["image3.jpeg"],],inputs=[image_input])
|
609 |
|
610 |
with gr.Accordion("MultiMed", open=False) as multimend_accordion:
|
@@ -632,5 +632,6 @@ def create_interface():
|
|
632 |
|
633 |
return iface
|
634 |
|
|
|
635 |
iface = create_interface()
|
636 |
iface.launch(show_error=True, debug=True)
|
|
|
2 |
|
3 |
from gradio_client import Client
|
4 |
import numpy as np
|
|
|
5 |
import gradio as gr
|
|
|
6 |
import requests
|
7 |
import json
|
8 |
import dotenv
|
|
|
9 |
import soundfile as sf
|
|
|
10 |
import time
|
11 |
+
import textwrap
|
12 |
from PIL import Image
|
13 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
from peft import PeftModel, PeftConfig
|
15 |
import torch
|
16 |
import os
|
|
|
27 |
model_directory = os.getenv('MODEL_DIRECTORY', 'default_model_directory')
|
28 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
29 |
|
30 |
+
image_description = ""
|
31 |
+
# audio_output = ""
|
32 |
+
|
33 |
|
34 |
+
def check_hallucination(assertion, citation):
|
35 |
+
api_url = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model"
|
36 |
+
header = {"Authorization": f"Bearer {hf_token}"}
|
37 |
+
payload = {"inputs": f"{assertion} [SEP] {citation}"}
|
38 |
|
39 |
+
response = requests.post(api_url, headers=header, json=payload, timeout=120)
|
40 |
output = response.json()
|
41 |
output = output[0][0]["score"]
|
42 |
|
43 |
return f"**hallucination score:** {output}"
|
44 |
|
45 |
+
|
46 |
# Define the API parameters
|
47 |
+
vapi_url = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model"
|
48 |
+
|
49 |
+
headers = {"Authorization": f"Bearer {hf_token}"}
|
50 |
|
|
|
51 |
|
52 |
# Function to query the API
|
53 |
def query(payload):
|
54 |
+
response = requests.post(vapi_url, headers=headers, json=payload)
|
55 |
return response.json()
|
56 |
|
57 |
+
|
58 |
# Function to evaluate hallucination
|
59 |
def evaluate_hallucination(input1, input2):
|
60 |
# Combine the inputs
|
|
|
74 |
|
75 |
return label
|
76 |
|
77 |
+
|
78 |
def save_audio(audio_input, output_dir="saved_audio"):
|
79 |
if not os.path.exists(output_dir):
|
80 |
os.makedirs(output_dir)
|
|
|
91 |
|
92 |
return file_path
|
93 |
|
94 |
+
|
95 |
def save_image(image_input, output_dir="saved_images"):
|
96 |
if not os.path.exists(output_dir):
|
97 |
os.makedirs(output_dir)
|
98 |
|
99 |
# Assuming image_input is a NumPy array
|
100 |
if isinstance(image_input, np.ndarray):
|
101 |
+
# Convert NumPy arrays to PIL Image
|
102 |
image = Image.fromarray(image_input)
|
103 |
|
104 |
# Generate a unique file name
|
|
|
113 |
raise ValueError("Invalid image input type")
|
114 |
|
115 |
|
|
|
116 |
def process_speech(input_language, audio_input):
|
117 |
"""
|
118 |
processing sound using seamless_m4t
|
119 |
"""
|
120 |
+
if audio_input is None:
|
121 |
return "no audio or audio did not save yet \nplease try again ! "
|
122 |
print(f"audio : {audio_input}")
|
123 |
print(f"audio type : {type(audio_input)}")
|
|
|
125 |
"S2TT",
|
126 |
"file",
|
127 |
None,
|
128 |
+
audio_input,
|
129 |
"",
|
130 |
+
input_language,
|
131 |
+
"English",
|
132 |
api_name="/run",
|
133 |
)
|
134 |
+
out = out[1] # get the text
|
135 |
+
try:
|
136 |
return f"{out}"
|
137 |
+
except Exception as e:
|
138 |
return f"{e}"
|
139 |
|
140 |
|
|
|
159 |
# Initialize variables
|
160 |
original_audio_file = None
|
161 |
translated_text = ""
|
162 |
+
new_file_path = ""
|
163 |
+
|
164 |
# Check if result contains files
|
165 |
if isinstance(result, list) and len(result) > 1:
|
166 |
downloaded_files = []
|
|
|
192 |
# Return a concise error message
|
193 |
return f"Error in text-to-speech conversion: {str(e)}", ""
|
194 |
|
195 |
+
# return "Unexpected result format or insufficient data received.", "" //UNREACHABLE CODE
|
196 |
+
|
197 |
|
198 |
def process_image(image_input):
|
199 |
# Initialize the Gradio client with the URL of the Gradio server
|
|
|
216 |
user_message = text
|
217 |
|
218 |
# Read authentication parameters from the .env file
|
219 |
+
customer_id = os.getenv('CUSTOMER_ID')
|
220 |
+
corpus_id = os.getenv('CORPUS_ID')
|
221 |
+
api_key = os.getenv('API_KEY')
|
222 |
|
223 |
# Define the headers
|
224 |
api_key_header = {
|
225 |
+
"customer-id": customer_id,
|
226 |
+
"x-api-key": api_key
|
227 |
}
|
228 |
|
229 |
# Define the request body in the structure provided in the example
|
|
|
250 |
},
|
251 |
"corpusKey": [
|
252 |
{
|
253 |
+
"customerId": customer_id,
|
254 |
+
"corpusId": corpus_id,
|
255 |
"semantics": 0,
|
256 |
"metadataFilter": "",
|
257 |
"lexicalInterpolationConfig": {
|
|
|
323 |
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
|
324 |
wrapped_text = '\n'.join(wrapped_lines)
|
325 |
return wrapped_text
|
326 |
+
|
327 |
+
|
328 |
def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
|
329 |
|
330 |
# Combine user input and system prompt
|
|
|
334 |
encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
|
335 |
model_inputs = encodeds.to(device)
|
336 |
|
337 |
+
# Generate a response using the model //MODEL UNDEFINED, using peft_model instead.
|
338 |
+
output = peft_model.generate(
|
339 |
**model_inputs,
|
340 |
+
max_length=512,
|
341 |
use_cache=True,
|
342 |
early_stopping=True,
|
343 |
+
bos_token_id=peft_model.config.bos_token_id,
|
344 |
+
eos_token_id=peft_model.config.eos_token_id,
|
345 |
+
pad_token_id=peft_model.config.eos_token_id,
|
346 |
temperature=0.1,
|
347 |
do_sample=True
|
348 |
)
|
|
|
352 |
|
353 |
return response_text
|
354 |
|
355 |
+
|
356 |
# Instantiate the Tokenizer
|
357 |
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t", token=hf_token, trust_remote_code=True, padding_side="left")
|
358 |
# tokenizer = AutoTokenizer.from_pretrained("Tonic/stablemed", trust_remote_code=True, padding_side="left")
|
|
|
369 |
def __init__(self):
|
370 |
self.history = []
|
371 |
|
372 |
+
def doctor(self, user_input, system_prompt="You are an expert medical analyst:"):
|
373 |
formatted_input = f"{system_prompt}{user_input}"
|
374 |
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
|
375 |
response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
376 |
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
377 |
return response_text
|
378 |
|
379 |
+
|
380 |
bot = ChatBot()
|
381 |
|
382 |
+
|
383 |
def process_summary_with_stablemed(summary):
|
384 |
system_prompt = "You are a medical instructor . Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
|
385 |
+
response_text = bot.doctor(summary, system_prompt)
|
386 |
return response_text
|
387 |
|
388 |
|
|
|
392 |
try:
|
393 |
|
394 |
combined_text = ""
|
|
|
395 |
markdown_output = ""
|
396 |
image_text = ""
|
397 |
+
|
|
|
398 |
# Debugging print statement
|
399 |
print(f"Image Input Type: {type(image_input)}, Audio Input Type: {type(audio_input)}")
|
400 |
|
|
|
462 |
except Exception as e:
|
463 |
return f"Error occurred during processing: {e}. No hallucination evaluation.", None
|
464 |
|
465 |
+
|
466 |
welcome_message = """
|
467 |
# 👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷
|
468 |
|
|
|
599 |
|
600 |
with gr.Accordion("Use Voice", open=False) as voice_accordion:
|
601 |
audio_input = gr.Audio(label="Speak")
|
602 |
+
# audio_output = gr.Markdown(label="Output text") # Markdown component for audio
|
603 |
gr.Examples([["audio1.wav"],["audio2.wav"],],inputs=[audio_input])
|
604 |
|
605 |
with gr.Accordion("Use a Picture", open=False) as picture_accordion:
|
606 |
image_input = gr.Image(label="Upload image")
|
607 |
+
# image_output = gr.Markdown(label="Output text") # Markdown component for image
|
608 |
gr.Examples([["image1.png"], ["image2.jpeg"], ["image3.jpeg"],],inputs=[image_input])
|
609 |
|
610 |
with gr.Accordion("MultiMed", open=False) as multimend_accordion:
|
|
|
632 |
|
633 |
return iface
|
634 |
|
635 |
+
|
636 |
iface = create_interface()
|
637 |
iface.launch(show_error=True, debug=True)
|