daniel Foley commited on
Commit
91b7015
·
1 Parent(s): 74eb75e

Sample Chainlit app placeholder

Browse files

Co-authored-by: Dan dfoley3838@gmail.com
Co-authored-by: Brandon bmv2021@bu.edu
Co-authored-by: Enrico enricoll@bu.edu

Files changed (3) hide show
  1. Dockerfile +1 -1
  2. app.py +79 -131
  3. requirements.txt +6 -4
Dockerfile CHANGED
@@ -62,7 +62,7 @@ COPY --chown=user ./requirements.txt requirements.txt
62
 
63
 
64
 
65
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
66
 
67
 
68
 
 
62
 
63
 
64
 
65
+ RUN pip install --no-cache-dir -r requirements.txt
66
 
67
 
68
 
app.py CHANGED
@@ -1,265 +1,213 @@
1
- import chainlit as cl
2
-
3
-
4
-
5
- from typing import Optional
6
-
7
-
8
-
9
- import time
10
-
11
-
12
-
13
-
14
-
15
-
16
-
17
- # Store conversation history
18
-
19
-
20
-
21
- conversation_memory = []
22
-
23
-
24
-
25
-
26
-
27
-
28
-
29
- @cl.on_chat_start
30
-
31
-
32
-
33
- async def start():
34
-
35
-
36
-
37
- """Initializes the chat session"""
38
-
39
-
40
-
41
- # Send an initial message
42
-
43
-
44
-
45
- await cl.Message(
46
-
47
-
48
 
49
- content="👋 Hello! I'm your AI assistant. How can I help you today?",
50
 
51
 
52
 
53
- author="Assistant"
54
 
 
55
 
 
56
 
57
- ).send()
58
 
 
59
 
 
60
 
61
-
62
 
63
 
64
 
65
- # Set some session variables
66
 
 
67
 
68
 
69
- cl.user_session.set("conversation_started", True)
70
 
 
71
 
72
 
73
 
 
74
 
75
 
76
 
77
- @cl.on_message
78
-
79
-
80
-
81
- async def main(message: cl.Message):
82
-
83
-
84
-
85
- """Main message handler"""
86
-
87
-
88
-
89
-
90
-
91
-
92
-
93
- # Simulate some processing time
94
-
95
-
96
-
97
- with cl.Step("Processing...") as step:
98
-
99
-
100
-
101
- time.sleep(1) # Simulated delay
102
-
103
-
104
-
105
- step.output = "Processed message"
106
-
107
-
108
-
109
-
110
-
111
-
112
-
113
- # Store message in conversation history
114
-
115
-
116
-
117
- conversation_memory.append({
118
-
119
-
120
-
121
- "role": "user",
122
 
123
 
124
 
125
- "content": message.content
126
 
127
 
 
128
 
129
- })
130
 
 
131
 
132
 
133
-
134
 
 
135
 
 
136
 
137
- # Create a response
138
 
 
139
 
 
140
 
141
- response = f"I received your message: '{message.content}'. This is a demo response."
142
 
 
143
 
 
144
 
145
-
146
 
147
 
 
148
 
149
- # Store response in conversation history
150
 
151
 
 
152
 
153
- conversation_memory.append({
154
 
155
 
156
 
157
- "role": "assistant",
158
 
 
159
 
160
 
161
- "content": response
162
 
 
163
 
 
164
 
165
- })
166
 
167
 
 
168
 
169
-
170
 
171
 
172
 
173
- # Send response with typing effect
174
 
 
175
 
 
176
 
177
- await cl.Message(
178
 
 
179
 
180
 
181
- content=response,
182
 
 
183
 
184
 
185
- author="Assistant"
186
 
 
187
 
 
188
 
189
- ).send()
190
 
 
191
 
 
192
 
 
193
 
194
 
195
 
 
196
 
197
- @cl.password_auth_callback
198
 
 
199
 
 
200
 
201
- def auth_callback(username: str, password: str) -> Optional[cl.User]:
202
 
 
203
 
 
204
 
205
- """Basic authentication handler"""
206
 
207
 
208
 
209
- # This is a simple example - in production, use proper authentication
210
 
 
211
 
 
212
 
213
- if username == "demo" and password == "password":
214
 
215
 
 
216
 
217
- return cl.User(identifier="demo", metadata={"role": "user"})
218
 
219
 
220
 
221
- return None
222
 
 
223
 
 
224
 
 
225
 
 
226
 
227
 
228
 
229
- @cl.on_chat_end
230
 
 
231
 
 
232
 
233
- async def end():
234
 
235
 
 
236
 
237
- """Cleanup when chat ends"""
238
 
239
 
 
240
 
241
- await cl.Message(content="👋 Thank you for chatting! Goodbye!").send()
242
 
 
243
 
 
244
 
 
245
 
 
246
 
 
247
 
 
248
 
249
- # Custom action handler example
250
 
251
 
 
252
 
253
- @cl.action_callback("feedback")
254
 
 
255
 
 
256
 
257
- async def on_action(action):
258
 
259
 
 
260
 
261
- """Handles custom feedback action"""
262
 
263
 
264
 
265
- await cl.Message(content=f"Received feedback: {action.value}").send()
 
1
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ from typing import List
4
 
5
 
6
 
7
+ from langchain.embeddings.openai import OpenAIEmbeddings
8
 
9
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
10
 
11
+ from langchain.vectorstores import Chroma
12
 
13
+ from langchain.chains import (
14
 
15
+ ConversationalRetrievalChain,
16
 
17
+ )
18
 
19
+ from langchain.chat_models import ChatOpenAI
20
 
21
 
22
 
23
+ from langchain.docstore.document import Document
24
 
25
+ from langchain.memory import ChatMessageHistory, ConversationBufferMemory
26
 
27
 
 
28
 
29
+ import chainlit as cl
30
 
31
 
32
 
33
+ os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
34
 
35
 
36
 
37
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
 
40
 
 
41
 
42
 
43
+ @cl.on_chat_start
44
 
45
+ async def on_chat_start():
46
 
47
+ files = None
48
 
49
 
 
50
 
51
+ # Wait for the user to upload a file
52
 
53
+ while files == None:
54
 
55
+ files = await cl.AskFileMessage(
56
 
57
+ content="Please upload a text file to begin!",
58
 
59
+ accept=["text/plain"],
60
 
61
+ max_size_mb=20,
62
 
63
+ timeout=180,
64
 
65
+ ).send()
66
 
 
67
 
68
 
69
+ file = files[0]
70
 
 
71
 
72
 
73
+ msg = cl.Message(content=f"Processing `{file.name}`...")
74
 
75
+ await msg.send()
76
 
77
 
78
 
79
+ with open(file.path, "r", encoding="utf-8") as f:
80
 
81
+ text = f.read()
82
 
83
 
 
84
 
85
+ # Split the text into chunks
86
 
87
+ texts = text_splitter.split_text(text)
88
 
 
89
 
90
 
91
+ # Create a metadata for each chunk
92
 
93
+ metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))]
94
 
95
 
96
 
97
+ # Create a Chroma vector store
98
 
99
+ embeddings = OpenAIEmbeddings()
100
 
101
+ docsearch = await cl.make_async(Chroma.from_texts)(
102
 
103
+ texts, embeddings, metadatas=metadatas
104
 
105
+ )
106
 
107
 
 
108
 
109
+ message_history = ChatMessageHistory()
110
 
111
 
 
112
 
113
+ memory = ConversationBufferMemory(
114
 
115
+ memory_key="chat_history",
116
 
117
+ output_key="answer",
118
 
119
+ chat_memory=message_history,
120
 
121
+ return_messages=True,
122
 
123
+ )
124
 
125
 
126
 
127
+ # Create a chain that uses the Chroma vector store
128
 
129
+ chain = ConversationalRetrievalChain.from_llm(
130
 
131
+ ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),
132
 
133
+ chain_type="stuff",
134
 
135
+ retriever=docsearch.as_retriever(),
136
 
137
+ memory=memory,
138
 
139
+ return_source_documents=True,
140
 
141
+ )
142
 
143
 
144
 
145
+ # Let the user know that the system is ready
146
 
147
+ msg.content = f"Processing `{file.name}` done. You can now ask questions!"
148
 
149
+ await msg.update()
150
 
 
151
 
152
 
153
+ cl.user_session.set("chain", chain)
154
 
 
155
 
156
 
157
 
 
158
 
159
+ @cl.on_message
160
 
161
+ async def main(message: cl.Message):
162
 
163
+ chain = cl.user_session.get("chain") # type: ConversationalRetrievalChain
164
 
165
+ cb = cl.AsyncLangchainCallbackHandler()
166
 
167
 
168
 
169
+ res = await chain.acall(message.content, callbacks=[cb])
170
 
171
+ answer = res["answer"]
172
 
173
+ source_documents = res["source_documents"] # type: List[Document]
174
 
 
175
 
176
 
177
+ text_elements = [] # type: List[cl.Text]
178
 
 
179
 
180
 
181
+ if source_documents:
182
 
183
+ for source_idx, source_doc in enumerate(source_documents):
184
 
185
+ source_name = f"source_{source_idx}"
186
 
187
+ # Create the text element referenced in the message
188
 
189
+ text_elements.append(
190
 
191
+ cl.Text(content=source_doc.page_content, name=source_name, display="side")
192
 
193
+ )
194
 
195
+ source_names = [text_el.name for text_el in text_elements]
196
 
 
197
 
198
 
199
+ if source_names:
200
 
201
+ answer += f"\nSources: {', '.join(source_names)}"
202
 
203
+ else:
204
 
205
+ answer += "\nNo sources found"
206
 
 
207
 
208
 
209
+ await cl.Message(content=answer, elements=text_elements).send()
210
 
 
211
 
212
 
213
 
 
requirements.txt CHANGED
@@ -1,9 +1,11 @@
1
- fastapi
2
 
 
3
 
 
4
 
5
- uvicorn[standard]
6
 
 
7
 
8
-
9
- chainlit
 
1
+ chainlit==1.3.0
2
 
3
+ langchain
4
 
5
+ langchain-community
6
 
7
+ pydantic==2.7.3
8
 
9
+ pydantic-settings==2.6.1
10
 
11
+ pydantic_core==2.18.4