Tonic commited on
Commit
d2a2154
·
1 Parent(s): d738eed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -15
app.py CHANGED
@@ -24,7 +24,7 @@ welcome_message = """
24
 
25
  🗣️📝Interact with ⚕🗣️😷MultiMed⚕ in any language using image, audio or text!
26
 
27
- 📚🌟💼 that uses [Tonic/mistralmed](https://huggingface.co/Tonic/mistralmed) and [adept/fuyu-8B](https://huggingface.co/adept/fuyu-8b) with [Vectara](https://huggingface.co/vectara) embeddings + retrieval w/ [Facebook/Seamless-m4t](https://huggingface.co/facebook/hf-seamless-m4t-large) for audio translation & accessibility.
28
  do [get in touch](https://discord.gg/GWpVpekp). You can also use 😷MultiMed⚕️ on your own data & in your own way by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/TeamTonic/MultiMed?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
29
  ### Join us :
30
 
@@ -76,7 +76,7 @@ languages = {
76
  components = {}
77
  dotenv.load_dotenv()
78
  seamless_client = Client("facebook/seamless_m4t")
79
- mistralmed_client = Client("https://tonic1-mistralmed-chat.hf.space/--replicas/crzkn/")
80
  HuggingFace_Token = os.getenv("HuggingFace_Token")
81
  hf_token = os.getenv("HuggingFace_Token")
82
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -363,19 +363,34 @@ def query_vectara(text):
363
  return f"Error: {response.status_code}"
364
 
365
 
366
- def process_summary_with_stablemed(summary):
367
- system_prompt = "You are a medical instructor. Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
368
- # Use the Mistral Med Gradio client API call
369
- result = mistralmed_client.predict(
370
- summary, # Summary text
371
- system_prompt, # System prompt
372
- api_name="/predict"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373
  )
374
- # Assuming the result is the response text
 
375
  response_text = result if isinstance(result, str) else "Error in processing"
376
  return response_text
377
-
378
- # Main function to handle the Gradio interface logic
379
 
380
  def process_and_query(input_language=None, audio_input=None, image_input=None, text_input=None):
381
  try:
@@ -433,8 +448,8 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
433
  for source in sources_info:
434
  markdown_output += f"* {source}\n"
435
 
436
- # Process the summary with Stablemed
437
- final_response = process_summary_with_stablemed(summary)
438
 
439
  # Convert translated text to speech and get both audio file and text
440
  target_language = "English" # Set the target language for the speech
@@ -444,7 +459,7 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
444
  hallucination_label = evaluate_hallucination(final_response, summary)
445
 
446
  # Add final response and hallucination label to Markdown output
447
- markdown_output += "\n### Processed Summary with StableMed\n"
448
  markdown_output += final_response + "\n"
449
  markdown_output += "\n### Hallucination Evaluation\n"
450
  markdown_output += f"* **Label**: {hallucination_label}\n"
 
24
 
25
  🗣️📝Interact with ⚕🗣️😷MultiMed⚕ in any language using image, audio or text!
26
 
27
+ 📚🌟💼 that uses [allenai/tulu-2-dpo-13b](https://huggingface.co/allenai/tulu-2-dpo-13b) & [Tonic/mistralmed](https://huggingface.co/Tonic/mistralmed) and [adept/fuyu-8B](https://huggingface.co/adept/fuyu-8b) with [Vectara](https://huggingface.co/vectara) embeddings + retrieval w/ [Facebook/Seamless-m4t](https://huggingface.co/facebook/hf-seamless-m4t-large) for audio translation & accessibility.
28
  do [get in touch](https://discord.gg/GWpVpekp). You can also use 😷MultiMed⚕️ on your own data & in your own way by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/TeamTonic/MultiMed?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
29
  ### Join us :
30
 
 
76
  components = {}
77
  dotenv.load_dotenv()
78
  seamless_client = Client("facebook/seamless_m4t")
79
+ tulu_client = Client("https://tonic1-tulu.hf.space/--replicas/xh5ff/")
80
  HuggingFace_Token = os.getenv("HuggingFace_Token")
81
  hf_token = os.getenv("HuggingFace_Token")
82
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
363
  return f"Error: {response.status_code}"
364
 
365
 
366
+ def process_summary_with_tulu(summary):
367
+ # Define the parameters for the Tulu Gradio client API call
368
+ your_message = summary
369
+ assistant_message = "You are a medical instructor. Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
370
+ max_new_tokens = 650
371
+ temperature = 0.4
372
+ top_p = 0.9
373
+ repetition_penalty = 0.9
374
+ advanced = True
375
+
376
+ # Initialize the Tulu client
377
+ tulu_client = Client("https://tonic1-tulu.hf.space/--replicas/xh5ff/")
378
+
379
+ # Make the API call
380
+ result = tulu_client.predict(
381
+ your_message,
382
+ assistant_message,
383
+ max_new_tokens,
384
+ temperature,
385
+ top_p,
386
+ repetition_penalty,
387
+ advanced,
388
+ fn_index=0
389
  )
390
+
391
+ # Process the result
392
  response_text = result if isinstance(result, str) else "Error in processing"
393
  return response_text
 
 
394
 
395
  def process_and_query(input_language=None, audio_input=None, image_input=None, text_input=None):
396
  try:
 
448
  for source in sources_info:
449
  markdown_output += f"* {source}\n"
450
 
451
+ # Process the summary with Tulu
452
+ final_response = process_summary_with_tulu(summary)
453
 
454
  # Convert translated text to speech and get both audio file and text
455
  target_language = "English" # Set the target language for the speech
 
459
  hallucination_label = evaluate_hallucination(final_response, summary)
460
 
461
  # Add final response and hallucination label to Markdown output
462
+ markdown_output += "\n### Processed Summary with Tulu\n"
463
  markdown_output += final_response + "\n"
464
  markdown_output += "\n### Hallucination Evaluation\n"
465
  markdown_output += f"* **Label**: {hallucination_label}\n"