Shanulhaq commited on
Commit
ea36423
·
verified ·
1 Parent(s): e72bbd9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -70
app.py CHANGED
@@ -1,35 +1,62 @@
 
 
1
  import streamlit as st
2
- from transformers import pipeline
 
3
  import whisper
4
  from gtts import gTTS
5
  import tempfile
6
- import os
7
- import logging
8
  from pydub import AudioSegment
9
- import openai
 
 
 
 
 
 
 
10
 
11
  # Set up logging
12
  logging.basicConfig(level=logging.INFO)
13
  logger = logging.getLogger(__name__)
14
 
15
- # Load Hugging Face model for text generation (instead of Google Cloud)
16
- def load_hf_model():
17
- # Load a model for heart health-related questions
18
- return pipeline("text-generation", model="gpt2")
19
-
20
- # Load Whisper model for transcription
21
- def load_whisper_model():
22
- return whisper.load_model("base")
23
-
24
- # Function to generate response using Hugging Face model
25
- def generate_hf_response(model, prompt):
26
- result = model(prompt, max_length=100, num_return_sequences=1)
27
- return result[0]["generated_text"]
28
-
29
- # Function to process audio input using Whisper and Hugging Face
30
- def process_audio(audio_file, hf_model, whisper_model):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  try:
32
- # Transcribe audio using Whisper
33
  result = whisper_model.transcribe(audio_file)
34
  user_text = result['text']
35
  logger.info(f"Transcription successful: {user_text}")
@@ -38,15 +65,17 @@ def process_audio(audio_file, hf_model, whisper_model):
38
  return "Error in transcribing audio.", None
39
 
40
  try:
41
- # Generate response using Hugging Face model
42
- response_text = generate_hf_response(hf_model, user_text)
43
- logger.info(f"Generated response: {response_text}")
44
- except Exception as e:
45
- logger.error(f"Error in generating response: {e}")
46
- return "Error in generating response.", None
 
 
 
47
 
48
  try:
49
- # Convert the response text to speech
50
  tts = gTTS(text=response_text, lang='en')
51
  audio_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
52
  tts.save(audio_file.name)
@@ -57,50 +86,72 @@ def process_audio(audio_file, hf_model, whisper_model):
57
 
58
  return response_text, audio_file.name
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  # Main application layout
61
  def main():
62
- st.title("Heart Health & Audio Processing App 🫀🎙️ (Hugging Face Edition)")
63
-
64
- # Load models
65
- hf_model = load_hf_model()
66
- whisper_model = load_whisper_model()
67
-
68
- # Two tabs: one for the chatbot and one for audio processing
69
- tab1, tab2 = st.tabs(["Heart Health Chatbot", "Audio Processing"])
70
-
71
- # Tab 1: Heart Health Chatbot
72
- with tab1:
73
- st.header("Chat with Heart Health Specialist")
74
-
75
- if "history" not in st.session_state:
76
- st.session_state.history = []
77
-
78
- user_input = st.text_input("Ask about heart health:", placeholder="Type here...")
79
-
80
- if st.button("Send") and user_input:
81
- bot_response = generate_hf_response(hf_model, user_input)
82
- st.session_state.history.append({"role": "user", "content": user_input})
83
- st.session_state.history.append({"role": "bot", "content": bot_response})
84
-
85
- for chat in st.session_state.history:
86
- if chat["role"] == "user":
87
- st.write(f"**You:** {chat['content']}")
88
- else:
89
- st.write(f"**Bot:** {chat['content']}")
90
-
91
- # Tab 2: Audio Processing
92
- with tab2:
93
- st.header("Audio Processing with Whisper and Hugging Face")
94
-
95
- uploaded_audio = st.file_uploader("Upload an audio file for transcription and response", type=["mp3", "wav", "ogg"])
96
-
97
- if uploaded_audio:
98
- with st.spinner("Processing audio..."):
99
- response_text, audio_file_path = process_audio(uploaded_audio, hf_model, whisper_model)
100
-
101
- if response_text:
102
- st.write(f"**Response:** {response_text}")
103
- st.audio(audio_file_path)
104
 
105
  # Run the app
106
  if __name__ == "__main__":
 
1
+ import os
2
+ import logging
3
  import streamlit as st
4
+ import google.generativeai as genai
5
+ from streamlit_chat import message
6
  import whisper
7
  from gtts import gTTS
8
  import tempfile
 
 
9
  from pydub import AudioSegment
10
+ from groq import Groq, GroqError
11
+
12
+ # Securely configure API keys
13
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
14
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
15
+
16
+ # Configure Google Generative AI API
17
+ genai.configure(api_key=GOOGLE_API_KEY)
18
 
19
  # Set up logging
20
  logging.basicConfig(level=logging.INFO)
21
  logger = logging.getLogger(__name__)
22
 
23
+ # Initialize Groq Client
24
+ if not GROQ_API_KEY:
25
+ raise ValueError("GROQ_API_KEY is not set.")
26
+ try:
27
+ groq_client = Groq(api_key=GROQ_API_KEY)
28
+ logger.info("Groq API key is set and client is initialized.")
29
+ except GroqError as e:
30
+ logger.error(f"Failed to initialize Groq client: {e}")
31
+ raise
32
+
33
+ # Load Whisper model for audio transcription
34
+ try:
35
+ whisper_model = whisper.load_model("base")
36
+ logger.info("Whisper model loaded successfully.")
37
+ except Exception as e:
38
+ logger.error(f"Failed to load Whisper model: {e}")
39
+ raise
40
+
41
+ # Initialize Google Generative Model for chatbot
42
+ model = genai.GenerativeModel(
43
+ 'gemini-1.5-flash',
44
+ system_instruction=(
45
+ "Persona: You are Dr. Assad Siddiqui, a heart specialist. Only provide information related to heart health, symptoms, and advice. "
46
+ "Ask users about their heart-related symptoms and provide consultation and guidance based on their input. "
47
+ "Always provide brief answers. If the inquiry is not related to heart health, politely say that you can only provide heart-related information. "
48
+ "Responses should be in Urdu written in English and in English."
49
+ )
50
+ )
51
+
52
+ # Function to get chatbot response
53
+ def get_chatbot_response(user_input):
54
+ response = model.generate_content(user_input)
55
+ return response.text.strip()
56
+
57
+ # Function to process audio using Whisper and Groq API
58
+ def process_audio(audio_file):
59
  try:
 
60
  result = whisper_model.transcribe(audio_file)
61
  user_text = result['text']
62
  logger.info(f"Transcription successful: {user_text}")
 
65
  return "Error in transcribing audio.", None
66
 
67
  try:
68
+ chat_completion = groq_client.chat.completions.create(
69
+ messages=[{"role": "user", "content": user_text}],
70
+ model="llama3-8b-8192",
71
+ )
72
+ response_text = chat_completion.choices[0].message.content
73
+ logger.info(f"Received response from Groq API: {response_text}")
74
+ except GroqError as e:
75
+ logger.error(f"Error in generating response with Groq API: {e}")
76
+ return "Error in generating response with Groq API.", None
77
 
78
  try:
 
79
  tts = gTTS(text=response_text, lang='en')
80
  audio_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
81
  tts.save(audio_file.name)
 
86
 
87
  return response_text, audio_file.name
88
 
89
+ # Streamlit page configuration
90
+ st.set_page_config(page_title="Heart Health Chatbot", page_icon="👨‍⚕️", layout="centered")
91
+
92
+ # Background and header
93
+ st.markdown("""
94
+ <style>
95
+ .stApp { background-image: url('https://cdn.wallpapersafari.com/29/34/8Ak1Sf.png'); background-size: cover; }
96
+ .chat-bubble { background-color: #128c7E; color: white; padding: 10px; border-radius: 10px; max-width: 70%; }
97
+ .user-bubble { background-color: #075e54; color: white; padding: 10px; border-radius: 10px; max-width: 70%; }
98
+ img.avatar { width: 50px; height: 50px; border-radius: 50%; }
99
+ </style>
100
+ <div style="padding:10px;text-align:center;color:white;">
101
+ <h1>Heart Health Chatbot 🫀</h1>
102
+ <p>Ask me anything about heart diseases!</p>
103
+ </div>
104
+ """, unsafe_allow_html=True)
105
+
106
+ # Initialize session state for chat history
107
+ if "history" not in st.session_state:
108
+ st.session_state.history = []
109
+
110
+ user_avatar_url = "https://img.freepik.com/free-photo/sad-cartoon-anatomical-heart_23-2149767987.jpg"
111
+ bot_avatar_url = "https://img.freepik.com/premium-photo/3d-render-man-doctor-avatar-round-sticker-with-cartoon-character-face-user-id-thumbnail.jpg"
112
+
113
+ # Function to display chat history
114
+ def display_chat_history():
115
+ for chat in st.session_state.history:
116
+ if chat["role"] == "user":
117
+ st.markdown(f"""
118
+ <div style="display: flex; justify-content: flex-end; margin-bottom: 10px;">
119
+ <div class="user-bubble"><p><b>You:</b> {chat['content']}</p></div>
120
+ <img src="{user_avatar_url}" class="avatar"/>
121
+ </div>
122
+ """, unsafe_allow_html=True)
123
+ else:
124
+ st.markdown(f"""
125
+ <div style="display: flex; margin-bottom: 10px;">
126
+ <img src="{bot_avatar_url}" class="avatar"/>
127
+ <div class="chat-bubble"><p><b>Bot:</b> {chat['content']}</p></div>
128
+ </div>
129
+ """, unsafe_allow_html=True)
130
+
131
  # Main application layout
132
  def main():
133
+ display_chat_history()
134
+ with st.container():
135
+ with st.form(key="user_input_form", clear_on_submit=True):
136
+ user_input = st.text_input("Type your message...", placeholder="Ask about heart health...", max_chars=500)
137
+ submit_button = st.form_submit_button("Send")
138
+
139
+ if submit_button and user_input.strip():
140
+ with st.spinner("Thinking..."):
141
+ bot_response = get_chatbot_response(user_input)
142
+
143
+ # Update chat history
144
+ st.session_state.history.append({"role": "user", "content": user_input})
145
+ st.session_state.history.append({"role": "bot", "content": bot_response})
146
+
147
+ display_chat_history()
148
+
149
+ # Footer
150
+ st.markdown("""
151
+ <p style="text-align:center; color:white; margin-top:50px;">
152
+ Check out the <a href="https://live-appointment-chatbot20.zapier.app/" target="_blank" style="color:#34c759;">Live Appointment</a>.
153
+ </p>
154
+ """, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
  # Run the app
157
  if __name__ == "__main__":