muhammadahmedrayyan commited on
Commit
9cc27af
·
verified ·
1 Parent(s): 5c29d5d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -0
app.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_option_menu import option_menu
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
+
5
+ # Set page configuration
6
+ st.set_page_config(
7
+ page_title="VitalCare GPT",
8
+ layout="wide",
9
+ initial_sidebar_state="expanded"
10
+ )
11
+
12
+ # Load Hugging Face models and tokenizers
13
+ @st.cache_resource
14
+ def load_models():
15
+ pipe_disease = pipeline("text-generation", model="harishussain12/PastelMed")
16
+ tokenizer_lynxmed = AutoTokenizer.from_pretrained("harishussain12/LynxMed")
17
+ model_lynxmed = AutoModelForCausalLM.from_pretrained("harishussain12/LynxMed")
18
+
19
+ tokenizer_neuramed = AutoTokenizer.from_pretrained("harishussain12/NeuraMed")
20
+ model_neuramed = AutoModelForCausalLM.from_pretrained("harishussain12/NeuraMed")
21
+
22
+ tokenizer_skyemed = AutoTokenizer.from_pretrained("harishussain12/SkyeMed")
23
+ model_skyemed = AutoModelForCausalLM.from_pretrained("harishussain12/SkyeMed")
24
+
25
+ tokenizer_clixmed = AutoTokenizer.from_pretrained("harishussain12/ClixMed")
26
+ model_clixmed = AutoModelForCausalLM.from_pretrained("harishussain12/ClixMed")
27
+
28
+ return pipe_disease, (tokenizer_lynxmed, model_lynxmed), (tokenizer_neuramed, model_neuramed), (tokenizer_skyemed, model_skyemed), (tokenizer_clixmed, model_clixmed)
29
+
30
+ # Function to create pipelines for all models
31
+ @st.cache_resource
32
+ def create_pipelines():
33
+ pipe_disease, (tokenizer_lynxmed, model_lynxmed), (tokenizer_neuramed, model_neuramed), (tokenizer_skyemed, model_skyemed), (tokenizer_clixmed, model_clixmed) = load_models()
34
+ pipeline_lynxmed = pipeline("text-generation", model=model_lynxmed, tokenizer=tokenizer_lynxmed)
35
+ pipe_neuramed = pipeline("text-generation", model=model_neuramed, tokenizer=tokenizer_neuramed)
36
+ pipe_skyemed = pipeline("text-generation", model=model_skyemed, tokenizer=tokenizer_skyemed)
37
+ pipeline_clixmed = pipeline("text-generation", model=model_clixmed, tokenizer=tokenizer_clixmed)
38
+
39
+ return {
40
+ "PastelMed": pipe_disease,
41
+ "LynxMed": pipeline_lynxmed,
42
+ "NeuraMed": pipe_neuramed,
43
+ "SkyeMed": pipe_skyemed,
44
+ "ClixMed": pipeline_clixmed
45
+ }
46
+
47
+ # Load pipelines
48
+ pipelines = create_pipelines()
49
+
50
+ # Sidebar with navigation
51
+ with st.sidebar:
52
+ selected = option_menu(
53
+ menu_title=None, # Remove the Navigation title
54
+ options=["Home", "Spaces", "About"],
55
+ icons=["house", "search", "info-circle"],
56
+ menu_icon="cast",
57
+ default_index=0,
58
+ styles={
59
+ "container": {"padding": "0!important", "background-color": "#3e4a5b"},
60
+ "icon": {"color": "#ffffff", "font-size": "16px"},
61
+ "nav-link": {
62
+ "font-size": "15px",
63
+ "text-align": "left",
64
+ "margin": "0px",
65
+ "color": "#ffffff",
66
+ "font-weight": "bold",
67
+ "padding": "10px 20px",
68
+ },
69
+ "nav-link-selected": {"background-color": "#0b2545", "color": "white"},
70
+ }
71
+ )
72
+
73
+ # Initialize session state for chat history
74
+ if 'home_chat_history' not in st.session_state:
75
+ st.session_state['home_chat_history'] = []
76
+
77
+ if 'chat_history' not in st.session_state:
78
+ st.session_state['chat_history'] = {}
79
+
80
+ # Define role-specific keywords or categories
81
+ role_keywords = {
82
+ "Doctor": ["symptoms", "diagnosis", "treatment", "disease", "medical condition",
83
+ "prescription", "procedure", "surgery", "consultation", "therapy",
84
+ "prognosis", "clinical", "specialist", "check-up", "imaging",
85
+ "laboratory tests", "pathology", "epidemiology", "anatomy", "physiology"],
86
+ "Nutritionist": ["diet", "nutrition", "meal plan", "calories", "weight management",
87
+ "vitamins", "minerals", "protein", "carbohydrates", "fats",
88
+ "healthy eating", "lifestyle", "food", "allergies", "deficiencies",
89
+ "hydration", "superfoods", "balanced diet", "supplements", "recipes"],
90
+ "Pharmacist": ["medication", "dosage", "side effects", "drug", "pharmacy",
91
+ "prescription", "over-the-counter", "interaction", "refill",
92
+ "formulation", "pharmacology", "pharmaceutical", "compounding",
93
+ "instructions", "contraindications", "storage", "expiry",
94
+ "dispense", "pharmacist advice", "generic drugs", "medicine"]
95
+ }
96
+
97
+ # Role prompts
98
+ role_prompts = {
99
+ "Doctor": """
100
+ You are assisting as a doctor.
101
+ Tasks:
102
+ - Answer medical questions concisely and accurately.
103
+ - Respond with: "I don't know about it" if the query is not related to the medical field.
104
+ """,
105
+ "Nutritionist": """
106
+ You are assisting as a nutritionist.
107
+ Tasks:
108
+ - Provide dietary advice based on queries.
109
+ - Suggest meal plans, calorie intake, and balanced diets.
110
+ - Respond with: "I don't know about it" if the query is not related to nutrition.
111
+ """,
112
+ "Pharmacist": """
113
+ You act as a pharmacist.
114
+ Tasks:
115
+ - Provide details on medications, dosages, and side effects.
116
+ - Respond with: "I don't know about it" if unrelated to medicine.
117
+ """
118
+ }
119
+
120
+ # Function to check if query matches the role
121
+ def is_query_relevant(role, query):
122
+ keywords = role_keywords.get(role, [])
123
+ query_lower = query.lower()
124
+ return any(keyword in query_lower for keyword in keywords)
125
+
126
+ # Main content based on navigation
127
+ if selected == "Home":
128
+ col1, col2, col3 = st.columns([1, 2, 1])
129
+
130
+ with col2:
131
+ st.markdown("<h1 style='text-align: center;'>VitalCare GPT</h1>", unsafe_allow_html=True)
132
+ st.markdown("<h3 style='text-align: center;'>How can I assist with your medical queries today?</h3>", unsafe_allow_html=True)
133
+
134
+ # Display chat history for Home section above input
135
+ for message in st.session_state['home_chat_history']:
136
+ with st.chat_message(message["role"]):
137
+ st.markdown(message["content"])
138
+
139
+ # Model selection
140
+ model_selection = st.selectbox(
141
+ "Select a model",
142
+ options=["SkyeMed", "NeuraMed", "PastelMed", "LynxMed", "ClixMed"],
143
+ index=0
144
+ )
145
+
146
+ # Search box
147
+ search_input = st.text_input(
148
+ "",
149
+ placeholder="Type your medical question here...",
150
+ label_visibility="collapsed",
151
+ help="Ask anything related to medical knowledge."
152
+ )
153
+
154
+ if search_input:
155
+ with st.spinner("Generating response..."):
156
+ try:
157
+ query_input = search_input
158
+ response = pipelines[model_selection](query_input, max_length=200, num_return_sequences=1)
159
+
160
+ # Save the user and assistant messages to chat history
161
+ st.session_state['home_chat_history'].append({"role": "user", "content": search_input})
162
+ st.session_state['home_chat_history'].append({"role": "assistant", "content": response[0]['generated_text']})
163
+
164
+ # Display the generated response
165
+ st.markdown(f"### Response:\n{response[0]['generated_text']}")
166
+
167
+ except Exception as e:
168
+ st.error(f"Error generating response: {str(e)}")
169
+
170
+ elif selected == "Spaces":
171
+ st.markdown("<h1>Spaces</h1>", unsafe_allow_html=True)
172
+
173
+ # Layout for space buttons
174
+ col1, col2, col3 = st.columns(3)
175
+ with col1:
176
+ if st.button("Doctor", key="doctor", use_container_width=True):
177
+ st.session_state.selected_role = "Doctor"
178
+ with col2:
179
+ if st.button("Nutritionist", key="nutritionist", use_container_width=True):
180
+ st.session_state.selected_role = "Nutritionist"
181
+ with col3:
182
+ if st.button("Pharmacist", key="pharmacist", use_container_width=True):
183
+ st.session_state.selected_role = "Pharmacist"
184
+
185
+ # Display the selected role
186
+ if "selected_role" in st.session_state:
187
+ selected_role = st.session_state.selected_role
188
+ st.markdown(f"<h2>Selected Space: {selected_role}</h2>", unsafe_allow_html=True)
189
+
190
+ # Initialize chat history for the selected role if not already done
191
+ if selected_role not in st.session_state['chat_history']:
192
+ st.session_state['chat_history'][selected_role] = []
193
+
194
+ # Display chat history for the selected role
195
+ for message in st.session_state['chat_history'][selected_role]:
196
+ with st.chat_message(message["role"]):
197
+ st.markdown(message["content"])
198
+
199
+ # Add model selection dropdown
200
+ model_selection = st.selectbox(
201
+ "Select a model",
202
+ options=["SkyeMed", "NeuraMed", "PastelMed", "LynxMed", "ClixMed"],
203
+ index=0
204
+ )
205
+
206
+ # Align query input and button on the same line
207
+ query_col1, query_col2 = st.columns([4, 1])
208
+ with query_col1:
209
+ query = st.text_input(
210
+ f"Enter your query as a {selected_role.lower()}:",
211
+ placeholder="Type your question here...",
212
+ label_visibility="collapsed"
213
+ )
214
+ with query_col2:
215
+ generate_clicked = st.button("Generate Response", key="generate_button")
216
+
217
+ if generate_clicked:
218
+ if query.strip():
219
+ with st.spinner("Generating response..."):
220
+ try:
221
+ # Check query relevance for the selected role
222
+ if not is_query_relevant(selected_role, query):
223
+ response_text = f"As a {selected_role.lower()}, I cannot answer this question."
224
+ else:
225
+ # Generate response using the selected model
226
+ role_prompt = role_prompts.get(selected_role, "")
227
+ formatted_query = f"\n\nquery: {query}\n"
228
+ response = pipelines[model_selection](formatted_query, max_length=200, num_return_sequences=1)
229
+ response_text = response[0]['generated_text']
230
+
231
+ # Save user and assistant messages to the selected role's chat history
232
+ st.session_state['chat_history'][selected_role].append({"role": "user", "content": query})
233
+ st.session_state['chat_history'][selected_role].append({"role": "assistant", "content": response_text})
234
+
235
+ # Display the response
236
+ st.markdown(f"### Response:\n{response_text}")
237
+
238
+ except Exception as e:
239
+ st.error(f"Error generating response: {str(e)}")
240
+ else:
241
+ st.warning("Please enter a query before generating a response.")
242
+
243
+ elif selected == "About":
244
+ st.markdown("<h1>About VitalCare GPT</h1>", unsafe_allow_html=True)
245
+ st.markdown(
246
+ """
247
+ VitalCare GPT is an advanced AI-powered platform designed to provide reliable and accurate medical insights, enabling users to access information related to healthcare and wellness effortlessly. Powered by cutting-edge language models, VitalCare GPT specializes in various domains, including general medical advice, nutritional guidance, and pharmaceutical expertise.
248
+
249
+ Whether you're looking for symptoms analysis, dietary recommendations, or medication details, our platform empowers users to interact seamlessly with AI models trained on specific medical and healthcare-related datasets. VitalCare GPT offers dedicated spaces for doctors, nutritionists, and pharmacists, ensuring tailored responses to your queries.
250
+ """
251
+ )
252
+
253
+ # Footer at the bottom with centered text, and adjusted when sidebar is toggled
254
+ st.markdown("""
255
+ <style>
256
+ .footer {
257
+ position: fixed;
258
+ bottom: 10px;
259
+ left: 50%;
260
+ transform: translateX(-50%);
261
+ color: white;
262
+ padding: 8px; /* Reduced padding */
263
+ border-radius: 10px;
264
+ font-size: 12px; /* Smaller font size */
265
+ text-align: center;
266
+ z-index: 1000;
267
+ background-color: transparent;
268
+ }
269
+
270
+ /* Adjust position based on sidebar */
271
+ .footer-container {
272
+ display: flex;
273
+ justify-content: center;
274
+ align-items: center;
275
+ position: fixed;
276
+ bottom: 10px;
277
+ left: 50%;
278
+ transform: translateX(-50%);
279
+ width: 100%;
280
+ }
281
+
282
+ @media screen and (max-width: 900px) {
283
+ .footer {
284
+ position: fixed;
285
+ left: 50%;
286
+ transform: translateX(-50%);
287
+ }
288
+ }
289
+ </style>
290
+
291
+ <div class="footer-container">
292
+ <div class="footer">
293
+ This GPT may take time to generate responses and may have lower accuracy.
294
+ </div>
295
+ </div>
296
+ """, unsafe_allow_html=True)
297
+
298
+ # Floating question mark icon with tooltip
299
+ st.markdown("""
300
+ <style>
301
+ /* Floating Question Mark Icon */
302
+ .help-icon {
303
+ position: fixed;
304
+ bottom: 10px;
305
+ right: 10px;
306
+ background-color: #333;
307
+ color: white;
308
+ font-size: 14px; /* Even smaller font size */
309
+ border-radius: 50%;
310
+ padding: 6px; /* Smaller padding */
311
+ width: 30px; /* Smaller width */
312
+ height: 30px; /* Smaller height */
313
+ display: flex;
314
+ align-items: center;
315
+ justify-content: center;
316
+ cursor: pointer;
317
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.2);
318
+ z-index: 1000;
319
+ }
320
+
321
+ /* Tooltip content when hovering */
322
+ .help-tooltip {
323
+ position: fixed;
324
+ bottom: 50px;
325
+ right: 10px;
326
+ background-color: rgba(0, 0, 0, 0.8);
327
+ color: white;
328
+ padding: 6px; /* Smaller padding */
329
+ border-radius: 10px;
330
+ font-size: 12px; /* Smaller font size */
331
+ display: none;
332
+ z-index: 1000;
333
+ }
334
+
335
+ .help-icon:hover + .help-tooltip,
336
+ .help-tooltip:hover {
337
+ display: block;
338
+ }
339
+ </style>
340
+
341
+ <!-- Help icon and tooltip -->
342
+ <div class="help-icon">?</div>
343
+ <div class="help-tooltip">
344
+ Developed by<br>
345
+ Rayyan & Haris
346
+ </div>
347
+ """, unsafe_allow_html=True)