Spaces:
Runtime error
Runtime error
Add option to give own prompt
Browse files- .gitignore +1 -1
- app.py +25 -3
- prompts/standalone_question.txt +7 -0
- utils.py +113 -0
.gitignore
CHANGED
@@ -10,4 +10,4 @@ data.json
|
|
10 |
audio.wav
|
11 |
video.mp4
|
12 |
|
13 |
-
prompts/
|
|
|
10 |
audio.wav
|
11 |
video.mp4
|
12 |
|
13 |
+
prompts/main_prompt.txt
|
app.py
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
-
from functions import *
|
2 |
from dotenv import load_dotenv
|
3 |
-
|
4 |
load_dotenv()
|
5 |
|
|
|
|
|
|
|
6 |
|
7 |
with (gr.Blocks() as app):
|
8 |
user_id = gr.State('') # id used to find the chat into the database
|
9 |
|
10 |
-
with gr.Tab('
|
11 |
with gr.Row() as select_author:
|
12 |
chat_btn = gr.Button(value='Start chat')
|
13 |
|
@@ -39,6 +40,27 @@ with (gr.Blocks() as app):
|
|
39 |
audio = gr.Audio(sources=['microphone'], type='filepath', label='Tell me your question')
|
40 |
button_audio = gr.Button(value='Submit audio')
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# -------------------------------------- Actions -----------------------------------------
|
43 |
chat_btn.click(
|
44 |
make_invisible, None, select_author
|
|
|
|
|
1 |
from dotenv import load_dotenv
|
|
|
2 |
load_dotenv()
|
3 |
|
4 |
+
from utils import *
|
5 |
+
from functions import *
|
6 |
+
|
7 |
|
8 |
with (gr.Blocks() as app):
|
9 |
user_id = gr.State('') # id used to find the chat into the database
|
10 |
|
11 |
+
with gr.Tab('Option 1'):
|
12 |
with gr.Row() as select_author:
|
13 |
chat_btn = gr.Button(value='Start chat')
|
14 |
|
|
|
40 |
audio = gr.Audio(sources=['microphone'], type='filepath', label='Tell me your question')
|
41 |
button_audio = gr.Button(value='Submit audio')
|
42 |
|
43 |
+
with gr.Tab('Option 2'):
|
44 |
+
prompt_questions = gr.State(init_prompt('questions'))
|
45 |
+
prompt_main = gr.Textbox(label='Enter your prompt')
|
46 |
+
msg_history = gr.State([])
|
47 |
+
|
48 |
+
chatbot_option2 = gr.Chatbot(label='Bella Nosotras')
|
49 |
+
start_button = gr.Button()
|
50 |
+
message = gr.Textbox(visible=False)
|
51 |
+
|
52 |
+
start_button.click(
|
53 |
+
start_chat,
|
54 |
+
[chatbot_option2, prompt_main],
|
55 |
+
[msg_history, chatbot_option2, start_button, message]
|
56 |
+
)
|
57 |
+
|
58 |
+
message.submit(
|
59 |
+
ask_query,
|
60 |
+
[message, chatbot_option2, msg_history, prompt_questions, prompt_main],
|
61 |
+
[message, chatbot_option2, msg_history]
|
62 |
+
)
|
63 |
+
|
64 |
# -------------------------------------- Actions -----------------------------------------
|
65 |
chat_btn.click(
|
66 |
make_invisible, None, select_author
|
prompts/standalone_question.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Your goal is to rephrase questions related to the brand Nosotras (which is a brand for feminine hygiene products) to show what is the best advice for the customer. Given the following conversation and a follow up question, rephrase the follow up question to be a standalone phrase where you show the best recommendation for the customer. Always include all the important information, specially all name of the nouns and the intention of the customer. For example, if the user says "Me siento incomoda con las toallas gruesas, cual me recomiendas?" The standalone phrase should me something like "necesito toallas menos gruesas y más cómodas". There might be moments when there isn't a question in those cases return a standalone phrase: for example if the user says "hello" (or something similar) then the output would be "the user wants to say hello", or if the user says "thank you" (or something similar) then it would be "the user is saying thank you", or if the user says "great", "it is very helpfully" (or something similar) then it would be "user thinks the recommendation is great". Your answer will always be in spanish.
|
2 |
+
|
3 |
+
Chat History:
|
4 |
+
|
5 |
+
HISTORY
|
6 |
+
Follow-up Input: QUESTION
|
7 |
+
Standalone question:
|
utils.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pinecone
|
3 |
+
import gradio as gr
|
4 |
+
from openai import OpenAI
|
5 |
+
|
6 |
+
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
7 |
+
pinecone.init(api_key=os.getenv("PINECONE_API_TOKEN"), environment=os.getenv("PINECONE_ENVIRONMENT"))
|
8 |
+
index = pinecone.Index(os.getenv("PINECONE_INDEX"))
|
9 |
+
|
10 |
+
|
11 |
+
def init_prompt(type_prompt: str) -> str:
|
12 |
+
if type_prompt == "main":
|
13 |
+
name_file = 'main_prompt.txt'
|
14 |
+
else:
|
15 |
+
name_file = 'standalone_question.txt'
|
16 |
+
|
17 |
+
with open(f"prompts/{name_file}", mode='r', encoding='utf-8') as infile:
|
18 |
+
prompt = infile.read()
|
19 |
+
|
20 |
+
return prompt
|
21 |
+
|
22 |
+
|
23 |
+
def get_embedding(text: str) -> list[float]:
|
24 |
+
response = openai_client.embeddings.create(
|
25 |
+
input=text,
|
26 |
+
model='text-embedding-ada-002'
|
27 |
+
)
|
28 |
+
return response.data[0].embedding
|
29 |
+
|
30 |
+
|
31 |
+
def call_api(message_history: list[dict]) -> str:
|
32 |
+
response = openai_client.chat.completions.create(
|
33 |
+
model='gpt-4-1106-preview',
|
34 |
+
temperature=0.7,
|
35 |
+
messages=message_history
|
36 |
+
)
|
37 |
+
return response.choices[0].message.content
|
38 |
+
|
39 |
+
|
40 |
+
def get_standalone_question(question: str, message_history: list[dict], prompt_q: str) -> str:
|
41 |
+
# Format the message history like: Human: blablablá \nAssistant: blablablá
|
42 |
+
history = ''
|
43 |
+
for i, msg in enumerate(message_history):
|
44 |
+
if i == 0:
|
45 |
+
continue # Omit the prompt
|
46 |
+
if i % 2 == 0:
|
47 |
+
history += f'Human: {msg["content"]}\n'
|
48 |
+
else:
|
49 |
+
history += f'Assistant: {msg["content"]}\n'
|
50 |
+
|
51 |
+
# Add history and question to the prompt and call chatgpt
|
52 |
+
prompt = [{'role': 'system', 'content': ''}]
|
53 |
+
content = prompt_q.replace('HISTORY', history).replace('QUESTION', question)
|
54 |
+
prompt[0]['content'] = content
|
55 |
+
|
56 |
+
return call_api(prompt)
|
57 |
+
|
58 |
+
|
59 |
+
def get_context(question: str) -> str:
|
60 |
+
q_embedding = get_embedding(question)
|
61 |
+
|
62 |
+
# Get most similar vectors
|
63 |
+
result = index.query(
|
64 |
+
vector=q_embedding,
|
65 |
+
top_k=10,
|
66 |
+
include_metadata=True
|
67 |
+
)['matches']
|
68 |
+
|
69 |
+
# Crete a string based on the text of each vector
|
70 |
+
context = ''
|
71 |
+
for r in result:
|
72 |
+
context += r['metadata']['Text'] + '\n'
|
73 |
+
return context
|
74 |
+
|
75 |
+
|
76 |
+
def get_answer(context: str, message_history: list[dict], question: str, prompt_m: str) -> str:
|
77 |
+
message_history[0]['content'] = prompt_m.replace('CONTEXT', context)
|
78 |
+
message_history.append({'role': 'user', 'content': question})
|
79 |
+
return call_api(message_history)
|
80 |
+
|
81 |
+
|
82 |
+
def ask_query(
|
83 |
+
msg: str, chat_history: list[list[str | None]], message_history: list[dict], prompt_q: str, prompt_m: str
|
84 |
+
) -> tuple[str, list[list[str | None]], list[dict]]:
|
85 |
+
|
86 |
+
if len(chat_history) == 5:
|
87 |
+
answer = 'Un placer haberte ayudado, hasta luego!'
|
88 |
+
|
89 |
+
else:
|
90 |
+
question = get_standalone_question(msg, message_history, prompt_q)
|
91 |
+
context = get_context(question)
|
92 |
+
answer = get_answer(context, message_history, question, prompt_m)
|
93 |
+
|
94 |
+
message_history.append({'role': 'assistant', 'content': answer})
|
95 |
+
chat_history.append([msg, answer])
|
96 |
+
|
97 |
+
return "", chat_history, message_history
|
98 |
+
|
99 |
+
|
100 |
+
def start_chat(chat_history: list[list[str | None]], prompt_m: str):
|
101 |
+
greeting = ('Hola 👋, ¡estoy encantada de conversar contigo! Antes de empezar, quiero asegurarte algo '
|
102 |
+
'importante: tu privacidad y confidencialidad son mi máxima prioridad. Puedes estar '
|
103 |
+
'tranquila sabiendo que nuestras conversaciones son completamente seguras y nunca '
|
104 |
+
'serán compartidas con terceros. ¿En qué puedo ayudarte hoy?')
|
105 |
+
|
106 |
+
message_history = [
|
107 |
+
{'role': 'system', 'content': prompt_m},
|
108 |
+
{'role': 'assistant', 'content': greeting}
|
109 |
+
]
|
110 |
+
|
111 |
+
chat_history.append(['', greeting])
|
112 |
+
|
113 |
+
return message_history, chat_history, gr.Button(visible=False), gr.Text(visible=True)
|