muhammadshaheryar commited on
Commit
b436fe9
·
verified ·
1 Parent(s): 6298003

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import openai
3
+ import whisper
4
+ import threading
5
+ import time
6
+ from gtts import gTTS
7
+ from IPython.display import Audio
8
+
9
+ # Streamlit UI setup
10
+ st.title("Real-Time Video-Calling AI Avatar Chatbot")
11
+
12
+ # Load Whisper model for speech-to-text
13
+ whisper_model = whisper.load_model("base")
14
+
15
+ # OpenAI API setup
16
+ openai.api_key = 'YOUR_OPENAI_API_KEY'
17
+
18
+ # Text-to-Speech function using gTTS
19
+ def text_to_speech(text):
20
+ tts = gTTS(text=text, lang='en')
21
+ audio_fp = '/tmp/response.mp3'
22
+ tts.save(audio_fp)
23
+ return audio_fp
24
+
25
+ # Play the audio in Colab
26
+ def play_audio(audio_fp):
27
+ return Audio(audio_fp)
28
+
29
+ # Get AI response using OpenAI API
30
+ def get_ai_response(text):
31
+ prompt = f'User: {text}\nAI:'
32
+ response = openai.Completion.create(
33
+ engine='text-davinci-003', prompt=prompt, max_tokens=150
34
+ )
35
+ return response.choices[0].text.strip()
36
+
37
+ # Function to handle video call
38
+ def video_call():
39
+ # Placeholder for webcam capture
40
+ # For demonstration, we use text as input (you can integrate actual webcam input)
41
+ user_input = "Hello AI, how are you?"
42
+
43
+ # Get response from AI
44
+ ai_response = get_ai_response(user_input)
45
+
46
+ # Convert the response to speech
47
+ audio_fp = text_to_speech(ai_response)
48
+
49
+ # Play the response
50
+ play_audio(audio_fp)
51
+
52
+ # Start video call in Streamlit
53
+ if st.button("Start Video Call"):
54
+ video_call()