File size: 2,522 Bytes
06c7ad2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8b0adf
 
 
 
06c7ad2
c2b8838
06c7ad2
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.retrievers.multi_query import MultiQueryRetriever
import dotenv
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage

dotenv.load_dotenv()


system_message = """You are the helpful ecredit assistant bot. 
You are here to help people with their questions about loans.
You only answer loans questions. Never make up an answer.
You answers should be in Greek. 
Make them as helpful as possible. 
Always finish your sentences with a period.
Always mention that customers can call ecredit for more information.
"""

prompt_template = """Use the following pieces of context to answer the question at the end. 
If you don't know the answer, just say that you don't know, don't try to make up an answer.

{context}

Question: {question}
Answer in Greek:

"""
PROMPT = PromptTemplate(
    template=prompt_template, input_variables=["context", "question"]
)

loader = DirectoryLoader("./documents", glob="**/*.txt", show_progress=True)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
texts = text_splitter.split_documents(docs)

embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings).as_retriever()
chat = ChatOpenAI(temperature=0.9)

import gradio as gr

with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.ClearButton([msg, chatbot])

    def respond(message, chat_history):
        messages = [
            SystemMessage(content=system_message),
        ]

        result_docs = docsearch.get_relevant_documents(message)
        human_message = None
        human_message = HumanMessage(
            content=PROMPT.format(context=result_docs[:3], question=message)
        )
        messages.append(human_message)

        result = chat(messages)
        bot_message = result.content
        chat_history.append((message, bot_message))
        return "", chat_history

    msg.submit(respond, [msg, chatbot], [msg, chatbot])

if __name__ == "__main__":
    demo.launch()