File size: 3,329 Bytes
06c7ad2
 
 
 
 
 
 
 
 
 
 
 
 
90793b1
06c7ad2
 
 
 
29ad627
06c7ad2
29ad627
 
06c7ad2
 
 
 
29ad627
06c7ad2
29ad627
06c7ad2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744696f
06c7ad2
90793b1
70d9611
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90793b1
06c7ad2
 
 
 
 
 
 
c8b0adf
 
 
 
06c7ad2
c2b8838
06c7ad2
 
 
 
 
 
 
 
 
 
 
 
90793b1
06c7ad2
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.retrievers.multi_query import MultiQueryRetriever
import dotenv
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
import gradio as gr

dotenv.load_dotenv()


system_message = """You are the helpful assistant representing the company ecredit. 
You answers should be in Greek. 
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Always finish your answer with "για περισσότερες πληροφορίες καλέστε στο: XXXXXXXXXXX.".
"""

prompt_template = """Use the following pieces of context to answer the question at the end. 
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Only answer questions that are related to the context. If it's not in the context say "Δεν γνωρίζω".

Context:
{context}

Question: {question}
Answer in Greek:
"""
PROMPT = PromptTemplate(
    template=prompt_template, input_variables=["context", "question"]
)

loader = DirectoryLoader("./documents", glob="**/*.txt", show_progress=True)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
texts = text_splitter.split_documents(docs)

embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings).as_retriever()
chat = ChatOpenAI(temperature=0.1)


# potential_questions = [
#     "ποια ειναι η εταιρεια;",
#     "ποια ειναι τα βηματα για ενα δανειο;",
#     "πως μπορω να κλεισω ραντεβου;",
#     "γιατι να ερθώ σε εσας;",
# ]

# for question in potential_questions:
#     result_docs = docsearch.get_relevant_documents(question)

#     print("Question: ", question)
#     print("-" * 20)

#     messages = [SystemMessage(content=system_message)]
#     human_message = HumanMessage(
#         content=PROMPT.format(context=result_docs[:3], question=question)
#     )
#     messages.append(human_message)
#     result = chat(messages)

#     print("Response: ", result, "\n")


with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.ClearButton([msg, chatbot])

    def respond(message, chat_history):
        messages = [
            SystemMessage(content=system_message),
        ]

        result_docs = docsearch.get_relevant_documents(message)
        human_message = None
        human_message = HumanMessage(
            content=PROMPT.format(context=result_docs[:3], question=message)
        )
        messages.append(human_message)

        result = chat(messages)
        bot_message = result.content
        chat_history.append((message, bot_message))
        return "", chat_history

    msg.submit(respond, [msg, chatbot], [msg, chatbot])


if __name__ == "__main__":
    demo.launch()