Vinh Nguyen commited on
Commit
c65178d
β€’
1 Parent(s): 2b35025

More proper code syntax

Browse files
Files changed (1) hide show
  1. app.py +35 -32
app.py CHANGED
@@ -1,8 +1,10 @@
1
  import streamlit as st
2
- from langchain.chains import ConversationalRetrievalChain
3
  from langchain.memory import ConversationBufferMemory
4
- from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
5
- from langchain_community.chat_models import ChatOpenAI
 
 
6
 
7
  from calback_handler import PrintRetrievalHandler, StreamHandler
8
  from chat_profile import ChatProfileRoleEnum
@@ -75,35 +77,36 @@ with chat_tab:
75
  if uploaded_files:
76
  result_retriever = configure_retriever(uploaded_files)
77
 
78
- memory = ConversationBufferMemory(
79
- memory_key="chat_history",
80
- chat_memory=msgs,
81
- return_messages=True,
82
- )
83
-
84
- # Setup LLM and QA chain
85
- llm = ChatOpenAI(
86
- model_name=LLM_MODEL,
87
- openai_api_key=openai_api_key,
88
- temperature=0,
89
- streaming=True,
90
- )
91
-
92
- chain = ConversationalRetrievalChain.from_llm(
93
- llm,
94
- retriever=result_retriever,
95
- memory=memory,
96
- verbose=False,
97
- max_tokens_limit=4000,
98
- )
99
-
100
- avatars = {
101
- ChatProfileRoleEnum.HUMAN: "user",
102
- ChatProfileRoleEnum.AI: "assistant",
103
- }
104
-
105
- for msg in msgs.messages:
106
- st.chat_message(avatars[msg.type]).write(msg.content)
 
107
 
108
  if not openai_api_key:
109
  st.caption("πŸ”‘ Add your **OpenAI API key** on the `Settings` to continue.")
 
1
  import streamlit as st
2
+ from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
3
  from langchain.memory import ConversationBufferMemory
4
+ from langchain_community.chat_message_histories.streamlit import (
5
+ StreamlitChatMessageHistory,
6
+ )
7
+ from langchain_community.chat_models.openai import ChatOpenAI
8
 
9
  from calback_handler import PrintRetrievalHandler, StreamHandler
10
  from chat_profile import ChatProfileRoleEnum
 
77
  if uploaded_files:
78
  result_retriever = configure_retriever(uploaded_files)
79
 
80
+ if result_retriever is not None:
81
+ memory = ConversationBufferMemory(
82
+ memory_key="chat_history",
83
+ chat_memory=msgs,
84
+ return_messages=True,
85
+ )
86
+
87
+ # Setup LLM and QA chain
88
+ llm = ChatOpenAI(
89
+ model=LLM_MODEL,
90
+ api_key=openai_api_key,
91
+ temperature=0,
92
+ streaming=True,
93
+ )
94
+
95
+ chain = ConversationalRetrievalChain.from_llm(
96
+ llm,
97
+ retriever=result_retriever,
98
+ memory=memory,
99
+ verbose=False,
100
+ max_tokens_limit=4000,
101
+ )
102
+
103
+ avatars = {
104
+ str(ChatProfileRoleEnum.HUMAN): "user",
105
+ str(ChatProfileRoleEnum.AI): "assistant",
106
+ }
107
+
108
+ for msg in msgs.messages:
109
+ st.chat_message(avatars[msg.type]).write(msg.content)
110
 
111
  if not openai_api_key:
112
  st.caption("πŸ”‘ Add your **OpenAI API key** on the `Settings` to continue.")