developer3000 commited on
Commit
92789c4
·
verified ·
1 Parent(s): 0a56e2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -34,7 +34,14 @@ def predict(message, history):
34
  history_openai_format.append({"role": "assistant", "content":assistant})
35
  history_openai_format.append({"role": "user", "content": message})
36
  prompt = f"You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. Your name is OpenBioLLM, and you were developed by Saama AI Labs with Open Life Science AI. who's willing to help answer the user's query with explanation. In your explanation, leverage your deep medical expertise such as relevant anatomical structures, physiological processes, diagnostic criteria, treatment guidelines, or other pertinent medical concepts. Use precise medical terminology while still aiming to make the explanation clear and accessible to a general audience. Medical Question: {history_openai_format} Medical Answer:"
37
- gpt_response = llm(prompt, max_tokens=4000)['choices'][0]['text']
38
- return gpt_response
 
 
 
 
 
 
 
39
 
40
  gr.ChatInterface(predict).launch()
 
34
  history_openai_format.append({"role": "assistant", "content":assistant})
35
  history_openai_format.append({"role": "user", "content": message})
36
  prompt = f"You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. Your name is OpenBioLLM, and you were developed by Saama AI Labs with Open Life Science AI. who's willing to help answer the user's query with explanation. In your explanation, leverage your deep medical expertise such as relevant anatomical structures, physiological processes, diagnostic criteria, treatment guidelines, or other pertinent medical concepts. Use precise medical terminology while still aiming to make the explanation clear and accessible to a general audience. Medical Question: {history_openai_format} Medical Answer:"
37
+ response = llm(prompt, max_tokens=4000)['choices'][0]['text']
38
+
39
+ partial_message = ""
40
+ for chunk in response:
41
+ if chunk.choices[0].delta.content is not None:
42
+ partial_message = partial_message + chunk.choices[0].delta.content
43
+ yield partial_message
44
+
45
+ # return gpt_response
46
 
47
  gr.ChatInterface(predict).launch()