zohaibterminator commited on
Commit
1b10d27
β€’
1 Parent(s): 12edfef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -163
app.py CHANGED
@@ -1,164 +1,164 @@
1
- import streamlit as st
2
- from dotenv import load_dotenv
3
- from audiorecorder import audiorecorder
4
- from langchain_core.messages import HumanMessage, AIMessage
5
- import requests
6
- from transformers import pipeline
7
- from gtts import gTTS
8
- import io
9
- from langchain_core.runnables.base import RunnableSequence
10
- from langchain_core.prompts import ChatPromptTemplate
11
- from langchain_groq import ChatGroq
12
- import os
13
- import requests
14
- from dotenv import load_dotenv
15
- from langgraph.checkpoint.memory import MemorySaver
16
- from langgraph.prebuilt import create_react_agent
17
- from langchain_community.tools.tavily_search import TavilySearchResults
18
-
19
- st.set_page_config(page_title="Urdu Virtual Assistant", page_icon="πŸ€–") # set the page title and icon
20
-
21
- # Load environment variables (if any)
22
- load_dotenv()
23
-
24
- user_id = "1" # example user id
25
-
26
- llm = ChatGroq(
27
- model="llama-3.1-70b-versatile",
28
- temperature=0,
29
- max_tokens=None,
30
- timeout=None,
31
- max_retries=5,
32
- groq_api_key=os.getenv("GROQ_API_KEY")
33
- )
34
-
35
- search = TavilySearchResults(
36
- max_results=2,
37
- )
38
- tools = [search]
39
-
40
- agent_executor = create_react_agent(llm, tools)
41
-
42
- # Initialize the wav2vec2 model for Urdu speech-to-text
43
- pipe = pipeline("automatic-speech-recognition", model="kingabzpro/wav2vec2-large-xls-r-300m-Urdu")
44
-
45
- def translate(target, text):
46
- '''
47
- Translates given text into target language
48
-
49
- Parameters:
50
- target (string): 2 character code to specify the target language.
51
- text (string): Text to be translated.
52
-
53
- Returns:
54
- res (string): Translated text.
55
- '''
56
- url = "https://microsoft-translator-text.p.rapidapi.com/translate"
57
-
58
- querystring = {"api-version":"3.0","profanityAction":"NoAction","textType":"plain", "to":target}
59
-
60
- payload = [{ "Text": text }]
61
- headers = {
62
- "x-rapidapi-key": os.getenv("RAPIDAPI_LANG_TRANS"),
63
- "x-rapidapi-host": "microsoft-translator-text.p.rapidapi.com",
64
- "Content-Type": "application/json"
65
- }
66
-
67
- response = requests.post(url, json=payload, headers=headers, params=querystring)
68
- res = response.json()
69
- return res[0]["translations"][0]["text"]
70
-
71
-
72
- def infer(user_input: str):
73
- '''
74
- Returns the translated response from the LLM in response to a user query.
75
-
76
- Parameters:
77
- user_id (string): User ID of a user.
78
- user_input (string): User query.
79
-
80
- Returns:
81
- res (string): Returns a translated response from the LLM.
82
- '''
83
-
84
- user_input = translate("en", user_input) # translate user query to english
85
-
86
- prompt = ChatPromptTemplate.from_messages( # define a prompt
87
- [
88
- (
89
- "system",
90
- "You are a compassionate and friendly AI virtual assistant. You will provide helpful answers to user queries using the provided tool to ensure the accuracy and relevance of your responses."
91
- ),
92
- ("human", "{user_input}")
93
- ]
94
- )
95
-
96
- runnable = prompt | agent_executor # define a chain
97
-
98
- conversation = RunnableSequence( # wrap the chain along with chat history and user input
99
- runnable,
100
- )
101
-
102
- response = conversation.invoke( # invoke the chain by giving the user input and the chat history
103
- {"user_input": user_input},
104
- )
105
-
106
- res = translate("ur", response["messages"][-1].content) # translate the response to Urdu
107
- return res
108
-
109
-
110
- def text_to_speech(text, lang='ur'):
111
- '''
112
- Converts text to speech using gTTS.
113
-
114
- Parameters:
115
- text (string): Text to be converted to speech.
116
- lang (string): Language for the speech synthesis. Default is 'ur' (Urdu).
117
- Returns:
118
- response_audio_io (BytesIO): BytesIO object containing the audio data.
119
- '''
120
- tts = gTTS(text, lang=lang)
121
- response_audio_io = io.BytesIO()
122
- tts.write_to_fp(response_audio_io)
123
- response_audio_io.seek(0)
124
- return response_audio_io
125
-
126
-
127
- col1, col2 = st.columns([1, 5]) # Adjust the ratio to control the logo and title sizes
128
-
129
- # Display the logo in the first column
130
- with col1:
131
- st.image("bolo_logo-removebg-preview.png", width=100) # Adjust the width as needed
132
-
133
- # Display the title in the second column
134
- with col2:
135
- st.title("Urdu Virtual Assistant") # set the main title of the application
136
- st.write("This application is a comprehensive speech-to-speech model designed to understand and respond in Urdu. It not only handles natural conversations but also has the capability to access and provide real-time information by integrating with the Tavily search engine. Whether you're asking for the weather or engaging in everyday dialogue, this assistant delivers accurate and context-aware responses, all in Urdu.")
137
-
138
- # Add a text input box
139
- audio = audiorecorder()
140
-
141
- if len(audio) > 0:
142
- # Save the audio to a file
143
- audio.export("audio.wav", format="wav")
144
-
145
- # Convert audio to text using the wav2vec2 model
146
- with open("audio.wav", "rb") as f:
147
- audio_bytes = f.read()
148
-
149
- # Process the audio file
150
- result = pipe("audio.wav")
151
- user_query = result["text"]
152
-
153
- with st.chat_message("Human"): # create the message box for human input
154
- st.audio(audio.export().read()) # display the audio player
155
- st.markdown(user_query)
156
-
157
- # Get response from the LLM
158
- response_text = infer(user_input=user_query)
159
- response_audio = text_to_speech(response_text, lang='ur')
160
-
161
- # Play the generated speech in the app
162
- with st.chat_message("AI"):
163
- st.audio(response_audio.read(), format='audio/mp3')
164
  st.markdown(response_text)
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from audiorecorder import audiorecorder
4
+ from langchain_core.messages import HumanMessage, AIMessage
5
+ import requests
6
+ from transformers import pipeline
7
+ from gtts import gTTS
8
+ import io
9
+ from langchain_core.runnables.base import RunnableSequence
10
+ from langchain_core.prompts import ChatPromptTemplate
11
+ from langchain_groq import ChatGroq
12
+ import os
13
+ import requests
14
+ from dotenv import load_dotenv
15
+ from langgraph.checkpoint.memory import MemorySaver
16
+ from langgraph.prebuilt import create_react_agent
17
+ from langchain_community.tools.tavily_search import TavilySearchResults
18
+
19
+ st.set_page_config(page_title="Urdu Virtual Assistant", page_icon="πŸ€–") # set the page title and icon
20
+
21
+ # Load environment variables (if any)
22
+ load_dotenv()
23
+
24
+ user_id = "1" # example user id
25
+
26
+ llm = ChatGroq(
27
+ model="llama-3.1-70b-versatile",
28
+ temperature=0.3,
29
+ max_tokens=None,
30
+ timeout=None,
31
+ max_retries=5,
32
+ groq_api_key=os.getenv("GROQ_API_KEY")
33
+ )
34
+
35
+ search = TavilySearchResults(
36
+ max_results=2,
37
+ )
38
+ tools = [search]
39
+
40
+ agent_executor = create_react_agent(llm, tools)
41
+
42
+ # Initialize the wav2vec2 model for Urdu speech-to-text
43
+ pipe = pipeline("automatic-speech-recognition", model="kingabzpro/wav2vec2-large-xls-r-300m-Urdu")
44
+
45
+ def translate(target, text):
46
+ '''
47
+ Translates given text into target language
48
+
49
+ Parameters:
50
+ target (string): 2 character code to specify the target language.
51
+ text (string): Text to be translated.
52
+
53
+ Returns:
54
+ res (string): Translated text.
55
+ '''
56
+ url = "https://microsoft-translator-text.p.rapidapi.com/translate"
57
+
58
+ querystring = {"api-version":"3.0","profanityAction":"NoAction","textType":"plain", "to":target}
59
+
60
+ payload = [{ "Text": text }]
61
+ headers = {
62
+ "x-rapidapi-key": os.getenv("RAPIDAPI_LANG_TRANS"),
63
+ "x-rapidapi-host": "microsoft-translator-text.p.rapidapi.com",
64
+ "Content-Type": "application/json"
65
+ }
66
+
67
+ response = requests.post(url, json=payload, headers=headers, params=querystring)
68
+ res = response.json()
69
+ return res[0]["translations"][0]["text"]
70
+
71
+
72
+ def infer(user_input: str):
73
+ '''
74
+ Returns the translated response from the LLM in response to a user query.
75
+
76
+ Parameters:
77
+ user_id (string): User ID of a user.
78
+ user_input (string): User query.
79
+
80
+ Returns:
81
+ res (string): Returns a translated response from the LLM.
82
+ '''
83
+
84
+ user_input = translate("en", user_input) # translate user query to english
85
+
86
+ prompt = ChatPromptTemplate.from_messages( # define a prompt
87
+ [
88
+ (
89
+ "system",
90
+ "You are a compassionate and friendly AI virtual assistant. You will provide helpful answers to user queries using the provided tool to ensure the accuracy and relevance of your responses."
91
+ ),
92
+ ("human", "{user_input}")
93
+ ]
94
+ )
95
+
96
+ runnable = prompt | agent_executor # define a chain
97
+
98
+ conversation = RunnableSequence( # wrap the chain along with chat history and user input
99
+ runnable,
100
+ )
101
+
102
+ response = conversation.invoke( # invoke the chain by giving the user input and the chat history
103
+ {"user_input": user_input},
104
+ )
105
+
106
+ res = translate("ur", response["messages"][-1].content) # translate the response to Urdu
107
+ return res
108
+
109
+
110
+ def text_to_speech(text, lang='ur'):
111
+ '''
112
+ Converts text to speech using gTTS.
113
+
114
+ Parameters:
115
+ text (string): Text to be converted to speech.
116
+ lang (string): Language for the speech synthesis. Default is 'ur' (Urdu).
117
+ Returns:
118
+ response_audio_io (BytesIO): BytesIO object containing the audio data.
119
+ '''
120
+ tts = gTTS(text, lang=lang)
121
+ response_audio_io = io.BytesIO()
122
+ tts.write_to_fp(response_audio_io)
123
+ response_audio_io.seek(0)
124
+ return response_audio_io
125
+
126
+
127
+ col1, col2 = st.columns([1, 5]) # Adjust the ratio to control the logo and title sizes
128
+
129
+ # Display the logo in the first column
130
+ with col1:
131
+ st.image("bolo_logo-removebg-preview.png", width=100) # Adjust the width as needed
132
+
133
+ # Display the title in the second column
134
+ with col2:
135
+ st.title("Urdu Virtual Assistant") # set the main title of the application
136
+ st.write("This application is a comprehensive speech-to-speech model designed to understand and respond in Urdu. It not only handles natural conversations but also has the capability to access and provide real-time information by integrating with the Tavily search engine. Whether you're asking for the weather or engaging in everyday dialogue, this assistant delivers accurate and context-aware responses, all in Urdu.")
137
+
138
+ # Add a text input box
139
+ audio = audiorecorder()
140
+
141
+ if len(audio) > 0:
142
+ # Save the audio to a file
143
+ audio.export("audio.wav", format="wav")
144
+
145
+ # Convert audio to text using the wav2vec2 model
146
+ with open("audio.wav", "rb") as f:
147
+ audio_bytes = f.read()
148
+
149
+ # Process the audio file
150
+ result = pipe("audio.wav")
151
+ user_query = result["text"]
152
+
153
+ with st.chat_message("Human"): # create the message box for human input
154
+ st.audio(audio.export().read()) # display the audio player
155
+ st.markdown(user_query)
156
+
157
+ # Get response from the LLM
158
+ response_text = infer(user_input=user_query)
159
+ response_audio = text_to_speech(response_text, lang='ur')
160
+
161
+ # Play the generated speech in the app
162
+ with st.chat_message("AI"):
163
+ st.audio(response_audio.read(), format='audio/mp3')
164
  st.markdown(response_text)