jonathanjordan21 commited on
Commit
b0b485a
·
verified ·
1 Parent(s): e94dc72

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -0
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # from langchain_community.llms import HuggingFaceTextGenInference
3
+ import os, pickle
4
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
+ from langchain.schema import StrOutputParser
6
+
7
+ from custom_llm import CustomLLM, custom_chain_with_history, custom_combined_chain, custom_dataframe_chain, format_df,custom_unique_df_chain
8
+
9
+
10
+ API_TOKEN = os.getenv('HF_INFER_API')
11
+
12
+
13
+ from typing import Optional
14
+
15
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
16
+ from langchain_community.chat_models import ChatAnthropic
17
+ from langchain_core.chat_history import BaseChatMessageHistory
18
+ from langchain.memory import ConversationBufferMemory
19
+ from langchain_core.runnables.history import RunnableWithMessageHistory
20
+
21
+
22
+ @st.cache_data
23
+ def get_df():
24
+ return pickle.load("ebesha_ticket_df.pkl")
25
+
26
+ @st.cache_data
27
+ def get_unique_values():
28
+ exec(custom_unique_df_chain(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), df=st.session_state.df).invoke({"df_example":format_df(st.session_state.df.head(4))}))
29
+ return response
30
+
31
+
32
+ @st.cache_resource
33
+ def get_llm_chain():
34
+ dataframe_chain = custom_dataframe_chain(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), df=st.session_state.df, unique_values=st.session_state.unique_values)
35
+ memory_chain = custom_chain_with_history(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), memory=st.session_state.memory)
36
+ return custom_unique_df_chain(llm=llm, dataframe_chain=dataframe_chain, memory_chain=memory_chain)
37
+
38
+ if 'memory' not in st.session_state:
39
+ st.session_state['memory'] = ConversationBufferMemory(return_messages=True)
40
+ st.session_state.memory.chat_memory.add_ai_message("Hello there! I'm AI assistant of Lintas Media Danawa. How can I help you today?")
41
+
42
+ if 'df' not in st.session_state:
43
+ st.session_state['df'] = get_df()
44
+
45
+
46
+ if 'unique_values' not in st.session_state:
47
+
48
+ st.session_state.unique_values = get_unique_values()
49
+
50
+
51
+
52
+ if 'chain' not in st.session_state:
53
+ # st.session_state['chain'] = custom_chain_with_history(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), memory=st.session_state.memory)
54
+ st.session_state['chain'] = get_llm_chain()
55
+ # st.session_state['chain'] = custom_chain_with_history(llm=InferenceClient("https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", headers = {"Authorization": f"Bearer {API_TOKEN}"}, stream=True, max_new_tokens=512, temperature=0.01), memory=st.session_state.memory)
56
+ st.title("LMD Chatbot V3)
57
+ st.subheader("Combination of Ticket Submission and WI/User Guide Knowledge")
58
+
59
+ # Initialize chat history
60
+ if "messages" not in st.session_state:
61
+ st.session_state.messages = [{"role":"assistant", "content":"Hello there! I'm AI assistant of Lintas Media Danawa. How can I help you today?"}]
62
+
63
+ # Display chat messages from history on app rerun
64
+ for message in st.session_state.messages:
65
+ with st.chat_message(message["role"]):
66
+ st.markdown(message["content"])
67
+
68
+
69
+ # React to user input
70
+ if prompt := st.chat_input("Ask me anything.."):
71
+ # Display user message in chat message container
72
+ st.chat_message("User").markdown(prompt)
73
+ # Add user message to chat history
74
+ st.session_state.messages.append({"role": "User", "content": prompt})
75
+
76
+ # full_response = st.session_state.chain.invoke(prompt).split("\n<|")[0]
77
+ full_response = st.session_state.chain.invoke({"question":prompt, "memory":st.session_state.memory, "df_example":format_df(st.session_state.df.head(4))}).split("\n<|")[0]
78
+
79
+
80
+ with st.chat_message("assistant"):
81
+ st.markdown(full_response)
82
+
83
+ # Display assistant response in chat message container
84
+ # with st.chat_message("assistant"):
85
+ # message_placeholder = st.empty()
86
+ # full_response = ""
87
+ # for chunk in st.session_state.chain.stream(prompt):
88
+ # full_response += chunk + " "
89
+ # message_placeholder.markdown(full_response + " ")
90
+ # if full_response[-4:] == "\n<|":
91
+ # break
92
+ # st.markdown(full_response)
93
+ st.session_state.memory.save_context({"question":prompt}, {"output":full_response})
94
+ st.session_state.memory.chat_memory.messages = st.session_state.memory.chat_memory.messages[-15:]
95
+ # Add assistant response to chat history
96
+ st.session_state.messages.append({"role": "assistant", "content": full_response})