lingyit1108 commited on
Commit
47e9340
β€’
1 Parent(s): 00561ea

to implement rag in the streamlit chatbot app

Browse files
Files changed (3) hide show
  1. archive/test.py +62 -0
  2. streamlit_app.py +82 -19
  3. streamlit_app_archive.py +137 -0
archive/test.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import utils
2
+ import os
3
+
4
+ import openai
5
+ from llama_index import SimpleDirectoryReader
6
+ from llama_index import Document
7
+ from llama_index import VectorStoreIndex
8
+ from llama_index import ServiceContext
9
+ from llama_index.llms import OpenAI
10
+
11
+ from llama_index.embeddings import HuggingFaceEmbedding
12
+ from trulens_eval import Tru
13
+ from llama_index.memory import ChatMemoryBuffer
14
+
15
+ from utils import get_prebuilt_trulens_recorder
16
+ import time
17
+
18
+ openai.api_key = utils.get_openai_api_key()
19
+
20
+ documents = SimpleDirectoryReader(
21
+ input_files=["./raw_documents/HI_Knowledge_Base.pdf"]
22
+ ).load_data()
23
+
24
+ document = Document(text="\n\n".join([doc.text for doc in documents]))
25
+
26
+ ### gpt-4-1106-preview
27
+ ### gpt-3.5-turbo-1106 / gpt-3.5-turbo
28
+ print("Initializing GPT 3.5 ..")
29
+ llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
30
+
31
+ print("Initializing bge-small-en-v1.5 embedding model ..")
32
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
33
+
34
+ print("Creating vector store ..")
35
+ service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
36
+ index = VectorStoreIndex.from_documents([document], service_context=service_context)
37
+
38
+ if False:
39
+ query_engine = index.as_query_engine(streaming=True)
40
+ else:
41
+ memory = ChatMemoryBuffer.from_defaults(token_limit=15000)
42
+
43
+ # chat_engine = index.as_query_engine(streaming=True)
44
+ chat_engine = index.as_chat_engine(
45
+ chat_mode="context",
46
+ memory=memory
47
+ )
48
+
49
+ while True:
50
+ input_str = input("[User]: ")
51
+ if input_str == "END":
52
+ break
53
+
54
+ # res = chat_engine.query(input_str)
55
+ res = chat_engine.stream_chat(input_str)
56
+
57
+ bot_response = ""
58
+ print("[Bot]: ", end="")
59
+ for s in res.response_gen:
60
+ bot_response += s
61
+ print(s, end="")
62
+ print("")
streamlit_app.py CHANGED
@@ -3,7 +3,16 @@ import os
3
  import pandas as pd
4
 
5
  import openai
6
- from openai import OpenAI
 
 
 
 
 
 
 
 
 
7
 
8
  import pkg_resources
9
  import shutil
@@ -35,6 +44,14 @@ shutil.copytree(ux_path, "./ux")
35
  st.set_page_config(page_title="πŸ’¬ Open AI Chatbot")
36
  openai_api = os.getenv("OPENAI_API_KEY")
37
 
 
 
 
 
 
 
 
 
38
  data_df = pd.DataFrame(
39
  {
40
  "Completion": [30, 40, 100, 10],
@@ -83,6 +100,9 @@ with st.sidebar:
83
 
84
  st.markdown("πŸ“– Reach out to SakiMilo to learn how to create this app!")
85
 
 
 
 
86
  # Store LLM generated responses
87
  if "messages" not in st.session_state.keys():
88
  st.session_state.messages = [{"role": "assistant",
@@ -96,23 +116,66 @@ for message in st.session_state.messages:
96
  def clear_chat_history():
97
  st.session_state.messages = [{"role": "assistant",
98
  "content": "How may I assist you today?"}]
 
 
 
 
 
 
99
  st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
100
 
101
- def generate_llm_response(client, prompt_input):
102
- system_content = ("You are a helpful assistant. "
103
- "You do not respond as 'User' or pretend to be 'User'. "
104
- "You only respond once as 'Assistant'."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  )
106
 
107
- completion = client.chat.completions.create(
108
- model=selected_model,
109
- messages=[
110
- {"role": "system", "content": system_content},
111
- ] + st.session_state.messages,
112
- temperature=temperature,
113
- stream=True
114
- )
115
- return completion
 
 
 
 
 
 
 
 
116
 
117
  # User-provided prompt
118
  if prompt := st.chat_input(disabled=not openai_api):
@@ -125,13 +188,13 @@ if prompt := st.chat_input(disabled=not openai_api):
125
  if st.session_state.messages[-1]["role"] != "assistant":
126
  with st.chat_message("assistant"):
127
  with st.spinner("Thinking..."):
128
- response = generate_llm_response(client, prompt)
 
129
  placeholder = st.empty()
130
  full_response = ""
131
- for chunk in response:
132
- if chunk.choices[0].delta.content is not None:
133
- full_response += chunk.choices[0].delta.content
134
- placeholder.markdown(full_response)
135
  placeholder.markdown(full_response)
136
  message = {"role": "assistant", "content": full_response}
137
  st.session_state.messages.append(message)
 
3
  import pandas as pd
4
 
5
  import openai
6
+
7
+ # from openai import OpenAI
8
+ from llama_index.llms import OpenAI
9
+
10
+ from llama_index import SimpleDirectoryReader
11
+ from llama_index import Document
12
+ from llama_index import VectorStoreIndex
13
+ from llama_index import ServiceContext
14
+ from llama_index.embeddings import HuggingFaceEmbedding
15
+ from llama_index.memory import ChatMemoryBuffer
16
 
17
  import pkg_resources
18
  import shutil
 
44
  st.set_page_config(page_title="πŸ’¬ Open AI Chatbot")
45
  openai_api = os.getenv("OPENAI_API_KEY")
46
 
47
+ # "./raw_documents/HI_Knowledge_Base.pdf"
48
+ input_files = ["./raw_documents/HI Chapter Summary Version 1.3.pdf"]
49
+ embedding_model = "BAAI/bge-small-en-v1.5"
50
+ system_content = ("You are a helpful study assistant. "
51
+ "You do not respond as 'User' or pretend to be 'User'. "
52
+ "You only respond once as 'Assistant'."
53
+ )
54
+
55
  data_df = pd.DataFrame(
56
  {
57
  "Completion": [30, 40, 100, 10],
 
100
 
101
  st.markdown("πŸ“– Reach out to SakiMilo to learn how to create this app!")
102
 
103
+ if "init" not in st.session_state.keys():
104
+ st.session_state.init = {"warm_start": "No"}
105
+
106
  # Store LLM generated responses
107
  if "messages" not in st.session_state.keys():
108
  st.session_state.messages = [{"role": "assistant",
 
116
  def clear_chat_history():
117
  st.session_state.messages = [{"role": "assistant",
118
  "content": "How may I assist you today?"}]
119
+ chat_engine = get_query_engine(input_files=input_files,
120
+ llm_model=selected_model,
121
+ temperature=temperature,
122
+ embedding_model=embedding_model,
123
+ system_content=system_content)
124
+ chat_engine.reset()
125
  st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
126
 
127
+ @st.cache_resource
128
+ def get_document_object(input_files):
129
+ documents = SimpleDirectoryReader(input_files=input_files).load_data()
130
+ document = Document(text="\n\n".join([doc.text for doc in documents]))
131
+ return document
132
+
133
+ @st.cache_resource
134
+ def get_llm_object(selected_model, temperature):
135
+ llm = OpenAI(model=selected_model, temperature=temperature)
136
+ return llm
137
+
138
+ @st.cache_resource
139
+ def get_embedding_model(model_name):
140
+ embed_model = HuggingFaceEmbedding(model_name=model_name)
141
+ return embed_model
142
+
143
+ @st.cache_resource
144
+ def get_query_engine(input_files, llm_model, temperature,
145
+ embedding_model, system_content):
146
+
147
+ document = get_document_object(input_files)
148
+ llm = get_llm_object(llm_model, temperature)
149
+ embedded_model = get_embedding_model(embedding_model)
150
+
151
+ service_context = ServiceContext.from_defaults(llm=llm, embed_model=embedded_model)
152
+ index = VectorStoreIndex.from_documents([document], service_context=service_context)
153
+ memory = ChatMemoryBuffer.from_defaults(token_limit=15000)
154
+
155
+ # chat_engine = index.as_query_engine(streaming=True)
156
+ chat_engine = index.as_chat_engine(
157
+ chat_mode="context",
158
+ memory=memory,
159
+ system_prompt=system_content
160
  )
161
 
162
+ return chat_engine
163
+
164
+ def generate_llm_response(prompt_input):
165
+ chat_engine = get_query_engine(input_files=input_files,
166
+ llm_model=selected_model,
167
+ temperature=temperature,
168
+ embedding_model=embedding_model,
169
+ system_content=system_content)
170
+
171
+ # st.session_state.messages
172
+ response = chat_engine.stream_chat(prompt_input)
173
+ return response
174
+
175
+ # Warm start
176
+ if st.session_state.init["warm_start"] == "No":
177
+ clear_chat_history()
178
+ st.session_state.init["warm_start"] = "Yes"
179
 
180
  # User-provided prompt
181
  if prompt := st.chat_input(disabled=not openai_api):
 
188
  if st.session_state.messages[-1]["role"] != "assistant":
189
  with st.chat_message("assistant"):
190
  with st.spinner("Thinking..."):
191
+ # response = generate_llm_response(client, prompt)
192
+ response = generate_llm_response(prompt)
193
  placeholder = st.empty()
194
  full_response = ""
195
+ for token in response.response_gen:
196
+ full_response += token
197
+ placeholder.markdown(full_response)
 
198
  placeholder.markdown(full_response)
199
  message = {"role": "assistant", "content": full_response}
200
  st.session_state.messages.append(message)
streamlit_app_archive.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import pandas as pd
4
+
5
+ import openai
6
+ from openai import OpenAI
7
+
8
+ import pkg_resources
9
+ import shutil
10
+ import main
11
+
12
+ ### To trigger trulens evaluation
13
+ main.main()
14
+
15
+ ### Finally, start streamlit app
16
+ leaderboard_path = pkg_resources.resource_filename(
17
+ "trulens_eval", "Leaderboard.py"
18
+ )
19
+ evaluation_path = pkg_resources.resource_filename(
20
+ "trulens_eval", "pages/Evaluations.py"
21
+ )
22
+ ux_path = pkg_resources.resource_filename(
23
+ "trulens_eval", "ux"
24
+ )
25
+
26
+ os.makedirs("./pages", exist_ok=True)
27
+ shutil.copyfile(leaderboard_path, os.path.join("./pages", "1_Leaderboard.py"))
28
+ shutil.copyfile(evaluation_path, os.path.join("./pages", "2_Evaluations.py"))
29
+
30
+ if os.path.exists("./ux"):
31
+ shutil.rmtree("./ux")
32
+ shutil.copytree(ux_path, "./ux")
33
+
34
+ # App title
35
+ st.set_page_config(page_title="πŸ’¬ Open AI Chatbot")
36
+ openai_api = os.getenv("OPENAI_API_KEY")
37
+
38
+ data_df = pd.DataFrame(
39
+ {
40
+ "Completion": [30, 40, 100, 10],
41
+ }
42
+ )
43
+ data_df.index = ["Chapter 1", "Chapter 2", "Chapter 3", "Chapter 4"]
44
+
45
+ # Replicate Credentials
46
+ with st.sidebar:
47
+ st.title("πŸ’¬ Open AI Chatbot")
48
+ st.write("This chatbot is created using the GPT model from Open AI.")
49
+ if openai_api:
50
+ pass
51
+ elif "OPENAI_API_KEY" in st.secrets:
52
+ st.success("API key already provided!", icon="βœ…")
53
+ openai_api = st.secrets["OPENAI_API_KEY"]
54
+ else:
55
+ openai_api = st.text_input("Enter OpenAI API token:", type="password")
56
+ if not (openai_api.startswith("sk-") and len(openai_api)==51):
57
+ st.warning("Please enter your credentials!", icon="⚠️")
58
+ else:
59
+ st.success("Proceed to entering your prompt message!", icon="πŸ‘‰")
60
+
61
+ ### for streamlit purpose
62
+ os.environ["OPENAI_API_KEY"] = openai_api
63
+
64
+ st.subheader("Models and parameters")
65
+ selected_model = st.sidebar.selectbox("Choose an OpenAI model",
66
+ ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"],
67
+ key="selected_model")
68
+ temperature = st.sidebar.slider("temperature", min_value=0.01, max_value=2.0,
69
+ value=0.1, step=0.01)
70
+ st.data_editor(
71
+ data_df,
72
+ column_config={
73
+ "Completion": st.column_config.ProgressColumn(
74
+ "Completion %",
75
+ help="Percentage of content covered",
76
+ format="%.1f%%",
77
+ min_value=0,
78
+ max_value=100,
79
+ ),
80
+ },
81
+ hide_index=False,
82
+ )
83
+
84
+ st.markdown("πŸ“– Reach out to SakiMilo to learn how to create this app!")
85
+
86
+ # Store LLM generated responses
87
+ if "messages" not in st.session_state.keys():
88
+ st.session_state.messages = [{"role": "assistant",
89
+ "content": "How may I assist you today?"}]
90
+
91
+ # Display or clear chat messages
92
+ for message in st.session_state.messages:
93
+ with st.chat_message(message["role"]):
94
+ st.write(message["content"])
95
+
96
+ def clear_chat_history():
97
+ st.session_state.messages = [{"role": "assistant",
98
+ "content": "How may I assist you today?"}]
99
+ st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
100
+
101
+ def generate_llm_response(client, prompt_input):
102
+ system_content = ("You are a helpful assistant. "
103
+ "You do not respond as 'User' or pretend to be 'User'. "
104
+ "You only respond once as 'Assistant'."
105
+ )
106
+
107
+ completion = client.chat.completions.create(
108
+ model=selected_model,
109
+ messages=[
110
+ {"role": "system", "content": system_content},
111
+ ] + st.session_state.messages,
112
+ temperature=temperature,
113
+ stream=True
114
+ )
115
+ return completion
116
+
117
+ # User-provided prompt
118
+ if prompt := st.chat_input(disabled=not openai_api):
119
+ client = OpenAI()
120
+ st.session_state.messages.append({"role": "user", "content": prompt})
121
+ with st.chat_message("user"):
122
+ st.write(prompt)
123
+
124
+ # Generate a new response if last message is not from assistant
125
+ if st.session_state.messages[-1]["role"] != "assistant":
126
+ with st.chat_message("assistant"):
127
+ with st.spinner("Thinking..."):
128
+ response = generate_llm_response(client, prompt)
129
+ placeholder = st.empty()
130
+ full_response = ""
131
+ for chunk in response:
132
+ if chunk.choices[0].delta.content is not None:
133
+ full_response += chunk.choices[0].delta.content
134
+ placeholder.markdown(full_response)
135
+ placeholder.markdown(full_response)
136
+ message = {"role": "assistant", "content": full_response}
137
+ st.session_state.messages.append(message)