Spaces:
Runtime error
Runtime error
from langchain.document_loaders import DirectoryLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import Chroma | |
from langchain.chat_models import ChatOpenAI | |
from langchain.retrievers.multi_query import MultiQueryRetriever | |
import dotenv | |
from langchain.indexes import VectorstoreIndexCreator | |
from langchain.chains.question_answering import load_qa_chain | |
from langchain.llms import OpenAI | |
from langchain.prompts import PromptTemplate | |
from langchain.chat_models import ChatOpenAI | |
from langchain.schema import AIMessage, HumanMessage, SystemMessage | |
import gradio as gr | |
dotenv.load_dotenv() | |
system_message = """You are the helpful assistant representing the company ecredit. | |
You answers should be in Greek. | |
If you don't know the answer, just say that you don't know, don't try to make up an answer. | |
Always finish your answer with "για περισσότερες πληροφορίες καλέστε στο: XXXXXXXXXXX.". | |
""" | |
prompt_template = """Use the following pieces of context to answer the question at the end. | |
If you don't know the answer, just say that you don't know, don't try to make up an answer. | |
Only answer questions that are related to the context. If it's not in the context say "Δεν γνωρίζω". | |
Context: | |
{context} | |
Question: {question} | |
Answer in Greek: | |
""" | |
PROMPT = PromptTemplate( | |
template=prompt_template, input_variables=["context", "question"] | |
) | |
loader = DirectoryLoader("./documents", glob="**/*.txt", show_progress=True) | |
docs = loader.load() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) | |
texts = text_splitter.split_documents(docs) | |
embeddings = OpenAIEmbeddings() | |
docsearch = Chroma.from_documents(texts, embeddings).as_retriever() | |
chat = ChatOpenAI(temperature=0.1) | |
# potential_questions = [ | |
# "ποια ειναι η εταιρεια;", | |
# "ποια ειναι τα βηματα για ενα δανειο;", | |
# "πως μπορω να κλεισω ραντεβου;", | |
# "γιατι να ερθώ σε εσας;", | |
# ] | |
# for question in potential_questions: | |
# result_docs = docsearch.get_relevant_documents(question) | |
# print("Question: ", question) | |
# print("-" * 20) | |
# messages = [SystemMessage(content=system_message)] | |
# human_message = HumanMessage( | |
# content=PROMPT.format(context=result_docs[:3], question=question) | |
# ) | |
# messages.append(human_message) | |
# result = chat(messages) | |
# print("Response: ", result, "\n") | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox() | |
clear = gr.ClearButton([msg, chatbot]) | |
def respond(message, chat_history): | |
messages = [ | |
SystemMessage(content=system_message), | |
] | |
result_docs = docsearch.get_relevant_documents(message) | |
human_message = None | |
human_message = HumanMessage( | |
content=PROMPT.format(context=result_docs[:3], question=message) | |
) | |
messages.append(human_message) | |
result = chat(messages) | |
bot_message = result.content | |
chat_history.append((message, bot_message)) | |
return "", chat_history | |
msg.submit(respond, [msg, chatbot], [msg, chatbot]) | |
if __name__ == "__main__": | |
demo.launch() | |