import os import pinecone import gradio as gr from openai import OpenAI openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) pinecone.init(api_key=os.getenv("PINECONE_API_TOKEN"), environment=os.getenv("PINECONE_ENVIRONMENT")) index = pinecone.Index(os.getenv("PINECONE_INDEX")) def init_prompt(type_prompt: str) -> str: if type_prompt == "main": name_file = 'main_prompt.txt' else: name_file = 'standalone_question.txt' with open(f"prompts/{name_file}", mode='r', encoding='utf-8') as infile: prompt = infile.read() return prompt def get_embedding(text: str) -> list[float]: response = openai_client.embeddings.create( input=text, model='text-embedding-ada-002' ) return response.data[0].embedding def call_api(message_history: list[dict]) -> str: response = openai_client.chat.completions.create( model='gpt-4-1106-preview', temperature=0.7, messages=message_history ) return response.choices[0].message.content def get_standalone_question(question: str, message_history: list[dict], prompt_q: str) -> str: # Format the message history like: Human: blablablá \nAssistant: blablablá history = '' for i, msg in enumerate(message_history): if i == 0: continue # Omit the prompt if i % 2 == 0: history += f'Human: {msg["content"]}\n' else: history += f'Assistant: {msg["content"]}\n' # Add history and question to the prompt and call chatgpt prompt = [{'role': 'system', 'content': ''}] content = prompt_q.replace('HISTORY', history).replace('QUESTION', question) prompt[0]['content'] = content return call_api(prompt) def get_context(question: str) -> str: q_embedding = get_embedding(question) # Get most similar vectors result = index.query( vector=q_embedding, top_k=10, include_metadata=True )['matches'] # Crete a string based on the text of each vector context = '' for r in result: context += r['metadata']['Text'] + '\n' return context def get_answer(context: str, message_history: list[dict], question: str, prompt_m: str) -> str: message_history[0]['content'] = prompt_m.replace('CONTEXT', context) message_history.append({'role': 'user', 'content': question}) return call_api(message_history) def ask_query( msg: str, chat_history: list[list[str | None]], message_history: list[dict], prompt_q: str, prompt_m: str ) -> tuple[str, list[list[str | None]], list[dict]]: if len(chat_history) == 5: answer = 'Un placer haberte ayudado, hasta luego!' else: question = get_standalone_question(msg, message_history, prompt_q) context = get_context(question) answer = get_answer(context, message_history, msg, prompt_m) message_history.append({'role': 'assistant', 'content': answer}) chat_history.append([msg, answer]) return "", chat_history, message_history def start_chat(chat_history: list[list[str | None]], prompt_m: str): greeting = ('Hola 👋, ¡estoy encantada de conversar contigo! Antes de empezar, quiero asegurarte algo ' 'importante: tu privacidad y confidencialidad son mi máxima prioridad. Puedes estar ' 'tranquila sabiendo que nuestras conversaciones son completamente seguras y nunca ' 'serán compartidas con terceros. ¿En qué puedo ayudarte hoy?') message_history = [ {'role': 'system', 'content': prompt_m}, {'role': 'assistant', 'content': greeting} ] chat_history.append(['', greeting]) return message_history, chat_history, gr.Button(visible=False), gr.Text(visible=True)