Spaces:
Sleeping
Sleeping
Upload advisor.py
Browse files- advisor.py +83 -0
advisor.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, config, requests
|
2 |
+
import gradio as gr
|
3 |
+
import pandas as pd
|
4 |
+
import numpy as np
|
5 |
+
from openai.embeddings_utils import get_embedding, cosine_similarity
|
6 |
+
import openai
|
7 |
+
openai.api_key = config.OPENAI_API_KEY
|
8 |
+
|
9 |
+
messages = [{"role": "system", "content": 'You are a telecom advisor. Respond to all input in 50 words in dictionary format .'}]
|
10 |
+
|
11 |
+
# prepare Q&A embeddings dataframe
|
12 |
+
question_df = pd.read_csv('data/questions_with_embeddings.csv')
|
13 |
+
question_df['embedding'] = question_df['embedding'].apply(eval).apply(np.array)
|
14 |
+
|
15 |
+
def transcribe(audio):
|
16 |
+
global messages, question_df
|
17 |
+
|
18 |
+
# API now requires an extension so we will rename the file
|
19 |
+
audio_filename_with_extension = audio + '.wav'
|
20 |
+
os.rename(audio, audio_filename_with_extension)
|
21 |
+
|
22 |
+
audio_file = open(audio_filename_with_extension, "rb")
|
23 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
24 |
+
|
25 |
+
question_vector = get_embedding(transcript['text'], engine='text-embedding-ada-002')
|
26 |
+
|
27 |
+
question_df["similarities"] = question_df['embedding'].apply(lambda x: cosine_similarity(x, question_vector))
|
28 |
+
question_df = question_df.sort_values("similarities", ascending=False)
|
29 |
+
|
30 |
+
best_answer = question_df.iloc[0]['answer']
|
31 |
+
|
32 |
+
user_text = f"Using the following text, answer the question '{transcript['text']}'. {config.ADVISOR_CUSTOM_PROMPT}: {best_answer}"
|
33 |
+
messages.append({"role": "user", "content": user_text})
|
34 |
+
|
35 |
+
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
|
36 |
+
|
37 |
+
system_message = response["choices"][0]["message"]
|
38 |
+
print(system_message)
|
39 |
+
messages.append(system_message)
|
40 |
+
|
41 |
+
# text to speech request with eleven labs
|
42 |
+
url = f"https://api.elevenlabs.io/v1/text-to-speech/{config.ADVISOR_VOICE_ID}/stream"
|
43 |
+
data = {
|
44 |
+
"text": system_message["content"].replace('"', ''),
|
45 |
+
"voice_settings": {
|
46 |
+
"stability": 0.1,
|
47 |
+
"similarity_boost": 0.8
|
48 |
+
}
|
49 |
+
}
|
50 |
+
|
51 |
+
r = requests.post(url, headers={'xi-api-key': config.ELEVEN_LABS_API_KEY}, json=data)
|
52 |
+
|
53 |
+
output_filename = "reply.mp3"
|
54 |
+
with open(output_filename, "wb") as output:
|
55 |
+
output.write(r.content)
|
56 |
+
|
57 |
+
chat_transcript = ""
|
58 |
+
for message in messages:
|
59 |
+
if message['role'] != 'system':
|
60 |
+
chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
|
61 |
+
|
62 |
+
# return chat_transcript
|
63 |
+
return chat_transcript, output_filename
|
64 |
+
|
65 |
+
|
66 |
+
# set a custom theme
|
67 |
+
theme = gr.themes.Default().set(
|
68 |
+
body_background_fill="#000000",
|
69 |
+
)
|
70 |
+
|
71 |
+
with gr.Blocks(theme=theme) as ui:
|
72 |
+
# advisor image input and microphone input
|
73 |
+
advisor = gr.Image(value=config.ADVISOR_IMAGE).style(width=config.ADVISOR_IMAGE_WIDTH, height=config.ADVISOR_IMAGE_HEIGHT)
|
74 |
+
audio_input = gr.Audio(source="microphone", type="filepath")
|
75 |
+
|
76 |
+
# text transcript output and audio
|
77 |
+
text_output = gr.Textbox(label="Conversation Transcript")
|
78 |
+
audio_output = gr.Audio()
|
79 |
+
|
80 |
+
btn = gr.Button("Run")
|
81 |
+
btn.click(fn=transcribe, inputs=audio_input, outputs=[text_output, audio_output])
|
82 |
+
|
83 |
+
ui.launch(debug=True, share=True)
|