Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,77 +1,9 @@
|
|
1 |
# Necessary Imports
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
import base64
|
5 |
-
import PIL.Image
|
6 |
import gradio as gr
|
7 |
-
import google.generativeai as genai
|
8 |
|
9 |
-
from
|
10 |
-
|
11 |
-
|
12 |
-
load_dotenv()
|
13 |
-
|
14 |
-
# Set the Gemini API Key
|
15 |
-
genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))
|
16 |
-
|
17 |
-
|
18 |
-
# Set up the model configuration for content generation
|
19 |
-
generation_config = {
|
20 |
-
"temperature": 0.4,
|
21 |
-
"top_p": 1,
|
22 |
-
"top_k": 32,
|
23 |
-
"max_output_tokens": 1400,
|
24 |
-
}
|
25 |
-
|
26 |
-
# Define safety settings for content generation
|
27 |
-
safety_settings = [
|
28 |
-
{"category": f"HARM_CATEGORY_{category}", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}
|
29 |
-
for category in [
|
30 |
-
"HARASSMENT",
|
31 |
-
"HATE_SPEECH",
|
32 |
-
"SEXUALLY_EXPLICIT",
|
33 |
-
"DANGEROUS_CONTENT",
|
34 |
-
]
|
35 |
-
]
|
36 |
-
|
37 |
-
|
38 |
-
# System Prompt
|
39 |
-
system_prompt = """
|
40 |
-
As a trusted medical chatbot, your role is crucial in providing accurate information and guidance to users seeking assistance in reducing preventable deaths of newborns and children under 5 years of age, as well as supporting the health and well-being of pregnant mothers and women. Your focus will be on addressing queries related to neonatal and under-five mortality rates, maternal health, and women's health issues, offering insights and recommendations to support these global health goals.
|
41 |
-
|
42 |
-
**Analysis Guidelines:**
|
43 |
-
|
44 |
-
1. **Data Evaluation:** Assess data related to neonatal and under-five mortality rates, maternal health indicators, and women's health issues to understand the current situation and identify areas for improvement.
|
45 |
-
2. **Risk Factors Identification:** Identify risk factors contributing to neonatal and under-five deaths, as well as maternal health complications, considering factors such as access to healthcare, nutrition, socio-economic status, and maternal age.
|
46 |
-
3. **Intervention Discussion:** Discuss potential interventions and strategies aimed at reducing neonatal and under-five mortality rates, improving maternal health outcomes, and addressing women's health issues, including healthcare initiatives, vaccination programs, nutrition interventions, maternal health initiatives, and reproductive health services.
|
47 |
-
4. **Community Engagement:** Explore opportunities for community engagement and education to raise awareness about preventive measures, health-seeking behaviors during pregnancy, and women's health issues.
|
48 |
-
5. **Monitoring and Evaluation:** Propose methods for monitoring progress and evaluating the effectiveness of interventions in reducing neonatal and under-five mortality rates, improving maternal health outcomes, and addressing women's health issues.
|
49 |
-
6. **Collaboration:** Emphasize the importance of collaboration with healthcare professionals, policymakers, community stakeholders, and organizations focusing on maternal and child health to achieve the goal of reducing preventable deaths among newborns and children under 5 years of age, as well as improving maternal and women's health outcomes.
|
50 |
-
|
51 |
-
**Refusal Policy:**
|
52 |
-
If the user provides information not related to reducing neonatal and under-five mortality rates, maternal health, or women's health issues, kindly inform them that this chatbot is designed to address queries specific to these global health goals. Encourage them to seek assistance from appropriate sources for other inquiries.
|
53 |
-
|
54 |
-
Your role as a medical chatbot is to provide valuable insights and recommendations to support efforts in reducing preventable deaths of newborns and children under 5 years of age, as well as improving maternal and women's health outcomes. Proceed to assist users with their queries, ensuring clarity, empathy, and accuracy in your responses.
|
55 |
-
|
56 |
-
"""
|
57 |
-
|
58 |
-
|
59 |
-
# Define the model name
|
60 |
-
model_name = "gemini-1.5-pro"
|
61 |
-
|
62 |
-
# Create the Gemini Models for Text and Vision respectively
|
63 |
-
txt_model = genai.GenerativeModel(
|
64 |
-
model_name=model_name,
|
65 |
-
generation_config=generation_config,
|
66 |
-
safety_settings=safety_settings,
|
67 |
-
system_instruction=system_prompt,
|
68 |
-
)
|
69 |
-
vis_model = genai.GenerativeModel(
|
70 |
-
model_name=model_name,
|
71 |
-
generation_config=generation_config,
|
72 |
-
safety_settings=safety_settings,
|
73 |
-
system_instruction=system_prompt,
|
74 |
-
)
|
75 |
|
76 |
|
77 |
# HTML Content for the Interface
|
@@ -96,91 +28,6 @@ DESCRIPTION = """
|
|
96 |
"""
|
97 |
|
98 |
|
99 |
-
# Image to Base 64 Converter Function
|
100 |
-
def image_to_base64(image_path):
|
101 |
-
"""
|
102 |
-
Convert an image file to a base64 encoded string.
|
103 |
-
|
104 |
-
Args:
|
105 |
-
image_path (str): The path to the image file.
|
106 |
-
|
107 |
-
Returns:
|
108 |
-
str: The base64 encoded string representation of the image.
|
109 |
-
"""
|
110 |
-
# Open Image and Encode it to Base64
|
111 |
-
with open(image_path, "rb") as img:
|
112 |
-
encoded_string = base64.b64encode(img.read())
|
113 |
-
|
114 |
-
# Return the Encoded String
|
115 |
-
return encoded_string.decode("utf-8")
|
116 |
-
|
117 |
-
|
118 |
-
# Function that takes User Inputs and displays it on ChatUI
|
119 |
-
def query_message(history, txt, img):
|
120 |
-
"""
|
121 |
-
Adds a query message to the chat history.
|
122 |
-
|
123 |
-
Parameters:
|
124 |
-
history (list): The chat history.
|
125 |
-
txt (str): The text message.
|
126 |
-
img (str): The image file path.
|
127 |
-
|
128 |
-
Returns:
|
129 |
-
list: The updated chat history.
|
130 |
-
"""
|
131 |
-
if not img:
|
132 |
-
history += [(txt, None)]
|
133 |
-
return history
|
134 |
-
|
135 |
-
# Convert Image to Base64
|
136 |
-
base64 = image_to_base64(img)
|
137 |
-
|
138 |
-
# Display Image on Chat UI and return the history
|
139 |
-
data_url = f"data:image/jpeg;base64,{base64}"
|
140 |
-
history += [(f"{txt} ![]({data_url})", None)]
|
141 |
-
return history
|
142 |
-
|
143 |
-
|
144 |
-
# Function that takes User Inputs, generates Response and displays on Chat UI
|
145 |
-
def llm_response(history, text, img):
|
146 |
-
"""
|
147 |
-
Generate a response based on the input.
|
148 |
-
|
149 |
-
Parameters:
|
150 |
-
history (list): A list of previous chat history.
|
151 |
-
text (str): The input text.
|
152 |
-
img (str): The path to an image file (optional).
|
153 |
-
|
154 |
-
Returns:
|
155 |
-
list: The updated chat history.
|
156 |
-
"""
|
157 |
-
|
158 |
-
# Convert chat history to string for context
|
159 |
-
history_str = "\n".join(
|
160 |
-
[
|
161 |
-
f"User: {msg[0]}\nBot: {msg[1]}" if msg[1] else f"User: {msg[0]}"
|
162 |
-
for msg in history
|
163 |
-
]
|
164 |
-
)
|
165 |
-
|
166 |
-
# Generate Response based on the Input
|
167 |
-
if not img:
|
168 |
-
# response = txt_model.generate_content(f"{system_prompt}User: {text}")
|
169 |
-
chat_session = txt_model.start_chat(history=[])
|
170 |
-
response = chat_session.send_message(f"History:\n{history_str}\nUser: {text}")
|
171 |
-
else:
|
172 |
-
# Open Image and Generate Response
|
173 |
-
img = PIL.Image.open(img)
|
174 |
-
chat_session = vis_model.start_chat(history=[])
|
175 |
-
response = chat_session.send_message([f"User: {text}", img])
|
176 |
-
|
177 |
-
# response = vis_model.generate_content([f"{system_prompt}User: {text}", img])
|
178 |
-
|
179 |
-
# Display Response on Chat UI and return the history
|
180 |
-
history += [(None, response.text)]
|
181 |
-
return history
|
182 |
-
|
183 |
-
|
184 |
# Interface Code using Gradio
|
185 |
with gr.Blocks(theme=gr.themes.Soft()) as app:
|
186 |
|
|
|
1 |
# Necessary Imports
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
+
# Import the necessary functions from the src folder
|
5 |
+
from src.chat import query_message
|
6 |
+
from src.llm_response import llm_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
|
9 |
# HTML Content for the Interface
|
|
|
28 |
"""
|
29 |
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
# Interface Code using Gradio
|
32 |
with gr.Blocks(theme=gr.themes.Soft()) as app:
|
33 |
|