File size: 3,759 Bytes
dbcbdf7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5665154
dbcbdf7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import os
import pinecone
import gradio as gr
from openai import OpenAI

openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
pinecone.init(api_key=os.getenv("PINECONE_API_TOKEN"), environment=os.getenv("PINECONE_ENVIRONMENT"))
index = pinecone.Index(os.getenv("PINECONE_INDEX"))


def init_prompt(type_prompt: str) -> str:
    if type_prompt == "main":
        name_file = 'main_prompt.txt'
    else:
        name_file = 'standalone_question.txt'

    with open(f"prompts/{name_file}", mode='r', encoding='utf-8') as infile:
        prompt = infile.read()

    return prompt


def get_embedding(text: str) -> list[float]:
    response = openai_client.embeddings.create(
        input=text,
        model='text-embedding-ada-002'
    )
    return response.data[0].embedding


def call_api(message_history: list[dict]) -> str:
    response = openai_client.chat.completions.create(
        model='gpt-4-1106-preview',
        temperature=0.7,
        messages=message_history
    )
    return response.choices[0].message.content


def get_standalone_question(question: str, message_history: list[dict], prompt_q: str) -> str:
    # Format the message history like: Human: blablablá \nAssistant: blablablá
    history = ''
    for i, msg in enumerate(message_history):
        if i == 0:
            continue  # Omit the prompt
        if i % 2 == 0:
            history += f'Human: {msg["content"]}\n'
        else:
            history += f'Assistant: {msg["content"]}\n'

    # Add history and question to the prompt and call chatgpt
    prompt = [{'role': 'system', 'content': ''}]
    content = prompt_q.replace('HISTORY', history).replace('QUESTION', question)
    prompt[0]['content'] = content

    return call_api(prompt)


def get_context(question: str) -> str:
    q_embedding = get_embedding(question)

    # Get most similar vectors
    result = index.query(
        vector=q_embedding,
        top_k=10,
        include_metadata=True
    )['matches']

    # Crete a string based on the text of each vector
    context = ''
    for r in result:
        context += r['metadata']['Text'] + '\n'
    return context


def get_answer(context: str, message_history: list[dict], question: str, prompt_m: str) -> str:
    message_history[0]['content'] = prompt_m.replace('CONTEXT', context)
    message_history.append({'role': 'user', 'content': question})
    return call_api(message_history)


def ask_query(
        msg: str, chat_history: list[list[str | None]], message_history: list[dict], prompt_q: str, prompt_m: str
) -> tuple[str, list[list[str | None]], list[dict]]:

    if len(chat_history) == 5:
        answer = 'Un placer haberte ayudado, hasta luego!'

    else:
        question = get_standalone_question(msg, message_history, prompt_q)
        context = get_context(question)
        answer = get_answer(context, message_history, msg, prompt_m)

    message_history.append({'role': 'assistant', 'content': answer})
    chat_history.append([msg, answer])

    return "", chat_history, message_history


def start_chat(chat_history: list[list[str | None]], prompt_m: str):
    greeting = ('Hola 👋, ¡estoy encantada de conversar contigo! Antes de empezar, quiero asegurarte algo '
                'importante: tu privacidad y confidencialidad son mi máxima prioridad. Puedes estar '
                'tranquila sabiendo que nuestras conversaciones son completamente seguras y nunca '
                'serán compartidas con terceros. ¿En qué puedo ayudarte hoy?')

    message_history = [
        {'role': 'system', 'content': prompt_m},
        {'role': 'assistant', 'content': greeting}
    ]

    chat_history.append(['', greeting])

    return message_history, chat_history, gr.Button(visible=False), gr.Text(visible=True)