Vinh Nguyen commited on
Commit
1ec5b20
β€’
1 Parent(s): 9c5fb2e

Use all-mpnet-base-v2 vector embedding for highest performance

Browse files
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -1,19 +1,15 @@
1
  import streamlit as st
2
-
3
  from langchain.chains import ConversationalRetrievalChain
4
- from langchain.chat_models import ChatOpenAI
5
  from langchain.memory import ConversationBufferMemory
6
  from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
 
7
 
8
- from document_retriever import configure_retriever
9
  from calback_handler import PrintRetrievalHandler, StreamHandler
10
  from chat_profile import ChatProfileRoleEnum
11
-
12
- # configs
13
- LLM_MODEL_NAME = "gpt-3.5-turbo"
14
 
15
  st.set_page_config(
16
- page_title=":books: InkChatGPT: Chat with Documents",
17
  page_icon="πŸ“š",
18
  initial_sidebar_state="collapsed",
19
  menu_items={
@@ -36,7 +32,9 @@ with st.container():
36
  col1, col2 = st.columns([0.3, 0.8])
37
  with col1:
38
  st.image(
39
- "./assets/app_icon.png", use_column_width="always", output_format="PNG"
 
 
40
  )
41
  with col2:
42
  st.header(":books: InkChatGPT")
@@ -55,6 +53,7 @@ with documents_tab:
55
  label="Select files",
56
  type=["pdf", "txt", "docx", "epub"],
57
  accept_multiple_files=True,
 
58
  )
59
 
60
  with chat_tab:
@@ -62,23 +61,28 @@ with chat_tab:
62
  result_retriever = configure_retriever(uploaded_files)
63
 
64
  memory = ConversationBufferMemory(
65
- memory_key="chat_history", chat_memory=msgs, return_messages=True
 
 
66
  )
67
 
68
  # Setup LLM and QA chain
69
  llm = ChatOpenAI(
70
- model_name=LLM_MODEL_NAME,
71
  openai_api_key=openai_api_key,
72
  temperature=0,
73
  streaming=True,
74
  )
75
 
76
  chain = ConversationalRetrievalChain.from_llm(
77
- llm, retriever=result_retriever, memory=memory, verbose=False
 
 
 
78
  )
79
 
80
  avatars = {
81
- ChatProfileRoleEnum.Human: "user",
82
  ChatProfileRoleEnum.AI: "assistant",
83
  }
84
 
@@ -89,7 +93,8 @@ if not openai_api_key:
89
  st.caption("πŸ”‘ Add your **OpenAI API key** on the `Settings` to continue.")
90
 
91
  if user_query := st.chat_input(
92
- placeholder="Ask me anything!", disabled=(not openai_api_key)
 
93
  ):
94
  st.chat_message("user").write(user_query)
95
 
 
1
  import streamlit as st
 
2
  from langchain.chains import ConversationalRetrievalChain
 
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
5
+ from langchain_community.chat_models import ChatOpenAI
6
 
 
7
  from calback_handler import PrintRetrievalHandler, StreamHandler
8
  from chat_profile import ChatProfileRoleEnum
9
+ from document_retriever import configure_retriever
 
 
10
 
11
  st.set_page_config(
12
+ page_title="InkChatGPT: Chat with Documents",
13
  page_icon="πŸ“š",
14
  initial_sidebar_state="collapsed",
15
  menu_items={
 
32
  col1, col2 = st.columns([0.3, 0.8])
33
  with col1:
34
  st.image(
35
+ "./assets/app_icon.png",
36
+ use_column_width="always",
37
+ output_format="PNG",
38
  )
39
  with col2:
40
  st.header(":books: InkChatGPT")
 
53
  label="Select files",
54
  type=["pdf", "txt", "docx", "epub"],
55
  accept_multiple_files=True,
56
+ disabled=(not openai_api_key),
57
  )
58
 
59
  with chat_tab:
 
61
  result_retriever = configure_retriever(uploaded_files)
62
 
63
  memory = ConversationBufferMemory(
64
+ memory_key="chat_history",
65
+ chat_memory=msgs,
66
+ return_messages=True,
67
  )
68
 
69
  # Setup LLM and QA chain
70
  llm = ChatOpenAI(
71
+ model_name="gpt-3.5-turbo",
72
  openai_api_key=openai_api_key,
73
  temperature=0,
74
  streaming=True,
75
  )
76
 
77
  chain = ConversationalRetrievalChain.from_llm(
78
+ llm,
79
+ retriever=result_retriever,
80
+ memory=memory,
81
+ verbose=False,
82
  )
83
 
84
  avatars = {
85
+ ChatProfileRoleEnum.HUMAN: "user",
86
  ChatProfileRoleEnum.AI: "assistant",
87
  }
88
 
 
93
  st.caption("πŸ”‘ Add your **OpenAI API key** on the `Settings` to continue.")
94
 
95
  if user_query := st.chat_input(
96
+ placeholder="Ask me anything!",
97
+ disabled=(not openai_api_key),
98
  ):
99
  st.chat_message("user").write(user_query)
100