Kaung Myat Htet commited on
Commit
b1ac1a0
·
1 Parent(s): 436b45e

add application file

Browse files
Files changed (2) hide show
  1. app.py +89 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from langchain_community.vectorstores import FAISS
4
+ from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
5
+
6
+ from langchain_core.runnables.passthrough import RunnableAssign, RunnablePassthrough
7
+ from langchain.memory import ConversationBufferMemory
8
+ from langchain_core.messages import get_buffer_string
9
+ from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings
10
+
11
+ from langchain_core.prompts import ChatPromptTemplate
12
+ from langchain_core.output_parsers import StrOutputParser
13
+
14
+
15
+ embedder = NVIDIAEmbeddings(model="nvolveqa_40k", model_type=None)
16
+ db = FAISS.load_local("vms_faiss_index", embedder, allow_dangerous_deserialization=True)
17
+
18
+ # docs = new_db.similarity_search(query)
19
+
20
+ nvidia_api_key = os.environ.get("NVIDIA_API_KEY", "")
21
+
22
+
23
+ from operator import itemgetter
24
+
25
+
26
+ # available models names
27
+ # mixtral_8x7b
28
+ # llama2_13b
29
+ llm = ChatNVIDIA(model="mixtral_8x7b") | StrOutputParser()
30
+
31
+ initial_msg = (
32
+ "Hello! I am VMS bot here to help you with your academic issues!"
33
+ f"\nHow can I help you?"
34
+ )
35
+
36
+ context_prompt = ChatPromptTemplate.from_messages([
37
+ ('system',
38
+ "You are a VMS chatbot, and you are helping students with their academic issues."
39
+ "Answer the question using only the context provided. Do not include based on the context or based on the documents provided in your answer."
40
+ "Please help them with their question. Remember that your job is to represent Vicent Mary School of Science and Technology (VMS) at Assumption University."
41
+ "Do not hallucinate any details, and make sure the knowledge base is not redundant."
42
+ "Please say you do not know if you do not know or you cannot find the information needed."
43
+ "\n\nQuestion: {question}\n\nContext: {context}"),
44
+ ('user', "{question}"
45
+ )])
46
+
47
+ chain = (
48
+ {
49
+ 'context': db.as_retriever(search_type="similarity"),
50
+ 'question': (lambda x:x)
51
+ }
52
+ | context_prompt
53
+ # | RPrint()
54
+ | llm
55
+ | StrOutputParser()
56
+ )
57
+
58
+ conv_chain = (
59
+ context_prompt
60
+ # | RPrint()
61
+ | llm
62
+ | StrOutputParser()
63
+ )
64
+
65
+ def chat_gen(message, history, return_buffer=True):
66
+ buffer = ""
67
+
68
+ doc_retriever = db.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.2})
69
+ retrieved_docs = doc_retriever.invoke(message)
70
+ print(len(retrieved_docs))
71
+ print(retrieved_docs)
72
+
73
+ if len(retrieved_docs) > 0:
74
+ state = {
75
+ 'question': message,
76
+ 'context': retrieved_docs
77
+ }
78
+ for token in conv_chain.stream(state):
79
+ buffer += token
80
+ yield buffer
81
+ else:
82
+ passage = "I am sorry. I do not have relevant information to answer on that specific topic. Please try another question."
83
+ buffer += passage
84
+ yield buffer if return_buffer else passage
85
+
86
+
87
+ chatbot = gr.Chatbot(value = [[None, initial_msg]])
88
+ iface = gr.ChatInterface(chat_gen, chatbot=chatbot).queue()
89
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ langchain
2
+ langchain-nvidia-ai-endpoints
3
+ gradio
4
+ faiss-cpu