add files
Browse files- .env.sample +5 -0
- .gitignore +6 -0
- Dockerfile +11 -0
- app.py +210 -0
- chainlit.md +1 -0
- data/paul_graham_essays.txt +0 -0
- requirements.txt +132 -0
- solution_app.py +190 -0
.env.sample
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# !!! DO NOT UPDATE THIS FILE DIRECTLY. MAKE A COPY AND RENAME IT `.env` TO PROCEED !!! #
|
2 |
+
HF_LLM_ENDPOINT="YOUR_LLM_ENDPOINT_URL_HERE"
|
3 |
+
HF_EMBED_ENDPOINT="YOUR_EMBED_MODEL_ENDPOINT_URL_HERE"
|
4 |
+
HF_TOKEN="YOUR_HF_TOKEN_HERE"
|
5 |
+
# !!! DO NOT UPDATE THIS FILE DIRECTLY. MAKE A COPY AND RENAME IT `.env` TO PROCEED !!! #
|
.gitignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
__pycache__/
|
3 |
+
.chainlit
|
4 |
+
*.faiss
|
5 |
+
*.pkl
|
6 |
+
.files
|
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
WORKDIR $HOME/app
|
7 |
+
COPY --chown=user . $HOME/app
|
8 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
9 |
+
RUN pip install -r requirements.txt
|
10 |
+
COPY . .
|
11 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import chainlit as cl
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from operator import itemgetter
|
5 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
6 |
+
from langchain_community.document_loaders import TextLoader
|
7 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
8 |
+
from langchain_community.vectorstores import FAISS
|
9 |
+
from langchain_huggingface import HuggingFaceEndpointEmbeddings
|
10 |
+
from langchain_core.prompts import PromptTemplate
|
11 |
+
from langchain.schema.output_parser import StrOutputParser
|
12 |
+
from langchain.schema.runnable import RunnablePassthrough
|
13 |
+
from langchain.schema.runnable.config import RunnableConfig
|
14 |
+
from tqdm.asyncio import tqdm_asyncio
|
15 |
+
import asyncio
|
16 |
+
from tqdm.asyncio import tqdm
|
17 |
+
|
18 |
+
# GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
|
19 |
+
# ---- ENV VARIABLES ---- #
|
20 |
+
"""
|
21 |
+
This function will load our environment file (.env) if it is present.
|
22 |
+
|
23 |
+
NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
|
24 |
+
"""
|
25 |
+
load_dotenv()
|
26 |
+
|
27 |
+
"""
|
28 |
+
We will load our environment variables here.
|
29 |
+
"""
|
30 |
+
HF_LLM_ENDPOINT = os.environ["HF_LLM_ENDPOINT"]
|
31 |
+
HF_EMBED_ENDPOINT = os.environ["HF_EMBED_ENDPOINT"]
|
32 |
+
HF_TOKEN = os.environ["HF_TOKEN"]
|
33 |
+
|
34 |
+
# ---- GLOBAL DECLARATIONS ---- #
|
35 |
+
|
36 |
+
# -- RETRIEVAL -- #
|
37 |
+
"""
|
38 |
+
1. Load Documents from Text File
|
39 |
+
2. Split Documents into Chunks
|
40 |
+
3. Load HuggingFace Embeddings (remember to use the URL we set above)
|
41 |
+
4. Index Files if they do not exist, otherwise load the vectorstore
|
42 |
+
"""
|
43 |
+
# 1. CREATE TEXT LOADER AND LOAD DOCUMENTS
|
44 |
+
# NOTE: PAY ATTENTION TO THE PATH THEY ARE IN.
|
45 |
+
text_loader = TextLoader("./data/paul_graham_essays.txt")
|
46 |
+
documents = text_loader.load()
|
47 |
+
|
48 |
+
# 2. CREATE TEXT SPLITTER AND SPLIT DOCUMENTS
|
49 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
50 |
+
chunk_size=1000, chunk_overlap=30)
|
51 |
+
split_documents = text_splitter.split_documents(documents)
|
52 |
+
|
53 |
+
# 3. LOAD HUGGINGFACE EMBEDDINGS
|
54 |
+
hf_embeddings = HuggingFaceEndpointEmbeddings(
|
55 |
+
model=HF_EMBED_ENDPOINT,
|
56 |
+
task="feature-extraction",
|
57 |
+
token=HF_TOKEN
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
async def add_documents_async(vectorstore, documents):
|
62 |
+
await vectorstore.aadd_documents(documents)
|
63 |
+
|
64 |
+
|
65 |
+
async def process_batch(vectorstore, batch, is_first_batch, pbar):
|
66 |
+
if is_first_batch:
|
67 |
+
result = await FAISS.afrom_documents(batch, hf_embeddings)
|
68 |
+
else:
|
69 |
+
await add_documents_async(vectorstore, batch)
|
70 |
+
result = vectorstore
|
71 |
+
pbar.update(len(batch))
|
72 |
+
return result
|
73 |
+
|
74 |
+
|
75 |
+
async def main():
|
76 |
+
print("Indexing Files")
|
77 |
+
|
78 |
+
vectorstore = None
|
79 |
+
batch_size = 32
|
80 |
+
|
81 |
+
batches = [split_documents[i:i+batch_size]
|
82 |
+
for i in range(0, len(split_documents), batch_size)]
|
83 |
+
|
84 |
+
async def process_all_batches():
|
85 |
+
nonlocal vectorstore
|
86 |
+
tasks = []
|
87 |
+
pbars = []
|
88 |
+
|
89 |
+
for i, batch in enumerate(batches):
|
90 |
+
pbar = tqdm(total=len(batch), desc=f"Batch {
|
91 |
+
i+1}/{len(batches)}", position=i)
|
92 |
+
pbars.append(pbar)
|
93 |
+
|
94 |
+
if i == 0:
|
95 |
+
vectorstore = await process_batch(None, batch, True, pbar)
|
96 |
+
else:
|
97 |
+
tasks.append(process_batch(vectorstore, batch, False, pbar))
|
98 |
+
|
99 |
+
if tasks:
|
100 |
+
await asyncio.gather(*tasks)
|
101 |
+
|
102 |
+
for pbar in pbars:
|
103 |
+
pbar.close()
|
104 |
+
|
105 |
+
await process_all_batches()
|
106 |
+
|
107 |
+
hf_retriever = vectorstore.as_retriever()
|
108 |
+
print("\nIndexing complete. Vectorstore is ready for use.")
|
109 |
+
return hf_retriever
|
110 |
+
|
111 |
+
|
112 |
+
async def run():
|
113 |
+
retriever = await main()
|
114 |
+
return retriever
|
115 |
+
|
116 |
+
hf_retriever = asyncio.run(run())
|
117 |
+
|
118 |
+
# -- AUGMENTED -- #
|
119 |
+
"""
|
120 |
+
1. Define a String Template
|
121 |
+
2. Create a Prompt Template from the String Template
|
122 |
+
"""
|
123 |
+
# 1. DEFINE STRING TEMPLATE
|
124 |
+
RAG_PROMPT_TEMPLATE = """\
|
125 |
+
<|start_header_id|>system<|end_header_id|>
|
126 |
+
You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context, say you I don't know.<|eot_id|>
|
127 |
+
|
128 |
+
<|start_header_id|>user<|end_header_id|>
|
129 |
+
User Query:
|
130 |
+
{query}
|
131 |
+
|
132 |
+
Context:
|
133 |
+
{context}<|eot_id|>
|
134 |
+
|
135 |
+
<|start_header_id|>assistant<|end_header_id|>
|
136 |
+
"""
|
137 |
+
|
138 |
+
# 2. CREATE PROMPT TEMPLATE
|
139 |
+
rag_prompt = PromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
|
140 |
+
|
141 |
+
# -- GENERATION -- #
|
142 |
+
"""
|
143 |
+
1. Create a HuggingFaceEndpoint for the LLM
|
144 |
+
"""
|
145 |
+
# 1. CREATE HUGGINGFACE ENDPOINT FOR LLM
|
146 |
+
hf_llm = HuggingFaceEndpoint(
|
147 |
+
endpoint_url=HF_LLM_ENDPOINT,
|
148 |
+
max_new_tokens=512,
|
149 |
+
top_k=10,
|
150 |
+
top_p=0.95,
|
151 |
+
typical_p=0.95,
|
152 |
+
temperature=0.01,
|
153 |
+
repetition_penalty=1.03,
|
154 |
+
huggingfacehub_api_token=os.environ["HF_TOKEN"]
|
155 |
+
)
|
156 |
+
|
157 |
+
|
158 |
+
@cl.author_rename
|
159 |
+
def rename(original_author: str):
|
160 |
+
"""
|
161 |
+
This function can be used to rename the 'author' of a message.
|
162 |
+
|
163 |
+
In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
|
164 |
+
"""
|
165 |
+
rename_dict = {
|
166 |
+
"Assistant": "Paul Graham Essay Bot"
|
167 |
+
}
|
168 |
+
return rename_dict.get(original_author, original_author)
|
169 |
+
|
170 |
+
|
171 |
+
@cl.on_chat_start
|
172 |
+
async def start_chat():
|
173 |
+
"""
|
174 |
+
This function will be called at the start of every user session.
|
175 |
+
|
176 |
+
We will build our LCEL RAG chain here, and store it in the user session.
|
177 |
+
|
178 |
+
The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
|
179 |
+
"""
|
180 |
+
|
181 |
+
# BUILD LCEL RAG CHAIN THAT ONLY RETURNS TEXT
|
182 |
+
lcel_rag_chain = (
|
183 |
+
{"context": itemgetter("query") | hf_retriever,
|
184 |
+
"query": itemgetter("query")}
|
185 |
+
| rag_prompt | hf_llm
|
186 |
+
)
|
187 |
+
|
188 |
+
cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
|
189 |
+
|
190 |
+
|
191 |
+
@cl.on_message
|
192 |
+
async def main(message: cl.Message):
|
193 |
+
"""
|
194 |
+
This function will be called every time a message is recieved from a session.
|
195 |
+
|
196 |
+
We will use the LCEL RAG chain to generate a response to the user query.
|
197 |
+
|
198 |
+
The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
|
199 |
+
"""
|
200 |
+
lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
|
201 |
+
|
202 |
+
msg = cl.Message(content="")
|
203 |
+
|
204 |
+
async for chunk in lcel_rag_chain.astream(
|
205 |
+
{"query": message.content},
|
206 |
+
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
|
207 |
+
):
|
208 |
+
await msg.stream_token(chunk)
|
209 |
+
|
210 |
+
await msg.send()
|
chainlit.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# FILL OUT YOUR CHAINLIT MD HERE WITH A DESCRIPTION OF YOUR APPLICATION
|
data/paul_graham_essays.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.2.1
|
2 |
+
aiohappyeyeballs==2.4.3
|
3 |
+
aiohttp==3.10.8
|
4 |
+
aiosignal==1.3.1
|
5 |
+
annotated-types==0.7.0
|
6 |
+
anyio==3.7.1
|
7 |
+
async-timeout==4.0.3
|
8 |
+
asyncer==0.0.2
|
9 |
+
attrs==24.2.0
|
10 |
+
bidict==0.23.1
|
11 |
+
certifi==2024.8.30
|
12 |
+
chainlit==0.7.700
|
13 |
+
charset-normalizer==3.3.2
|
14 |
+
click==8.1.7
|
15 |
+
dataclasses-json==0.5.14
|
16 |
+
Deprecated==1.2.14
|
17 |
+
distro==1.9.0
|
18 |
+
exceptiongroup==1.2.2
|
19 |
+
faiss-cpu==1.8.0.post1
|
20 |
+
fastapi==0.100.1
|
21 |
+
fastapi-socketio==0.0.10
|
22 |
+
filelock==3.16.1
|
23 |
+
filetype==1.2.0
|
24 |
+
frozenlist==1.4.1
|
25 |
+
fsspec==2024.9.0
|
26 |
+
googleapis-common-protos==1.65.0
|
27 |
+
greenlet==3.1.1
|
28 |
+
grpcio==1.66.2
|
29 |
+
grpcio-tools==1.62.3
|
30 |
+
h11==0.14.0
|
31 |
+
h2==4.1.0
|
32 |
+
hpack==4.0.0
|
33 |
+
httpcore==0.17.3
|
34 |
+
httpx==0.24.1
|
35 |
+
huggingface-hub==0.25.1
|
36 |
+
hyperframe==6.0.1
|
37 |
+
idna==3.10
|
38 |
+
importlib_metadata==8.4.0
|
39 |
+
Jinja2==3.1.4
|
40 |
+
jiter==0.5.0
|
41 |
+
joblib==1.4.2
|
42 |
+
jsonpatch==1.33
|
43 |
+
jsonpointer==3.0.0
|
44 |
+
langchain==0.3.0
|
45 |
+
langchain-community==0.3.0
|
46 |
+
langchain-core==0.3.1
|
47 |
+
langchain-huggingface==0.1.0
|
48 |
+
langchain-openai==0.2.0
|
49 |
+
langchain-qdrant==0.1.4
|
50 |
+
langchain-text-splitters==0.3.0
|
51 |
+
langsmith==0.1.121
|
52 |
+
Lazify==0.4.0
|
53 |
+
MarkupSafe==2.1.5
|
54 |
+
marshmallow==3.22.0
|
55 |
+
mpmath==1.3.0
|
56 |
+
multidict==6.1.0
|
57 |
+
mypy-extensions==1.0.0
|
58 |
+
nest-asyncio==1.6.0
|
59 |
+
networkx==3.2.1
|
60 |
+
numpy==1.26.4
|
61 |
+
nvidia-cublas-cu12==12.1.3.1
|
62 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
63 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
64 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
65 |
+
nvidia-cudnn-cu12==9.1.0.70
|
66 |
+
nvidia-cufft-cu12==11.0.2.54
|
67 |
+
nvidia-curand-cu12==10.3.2.106
|
68 |
+
nvidia-cusolver-cu12==11.4.5.107
|
69 |
+
nvidia-cusparse-cu12==12.1.0.106
|
70 |
+
nvidia-nccl-cu12==2.20.5
|
71 |
+
nvidia-nvjitlink-cu12==12.6.77
|
72 |
+
nvidia-nvtx-cu12==12.1.105
|
73 |
+
openai==1.51.0
|
74 |
+
opentelemetry-api==1.27.0
|
75 |
+
opentelemetry-exporter-otlp==1.27.0
|
76 |
+
opentelemetry-exporter-otlp-proto-common==1.27.0
|
77 |
+
opentelemetry-exporter-otlp-proto-grpc==1.27.0
|
78 |
+
opentelemetry-exporter-otlp-proto-http==1.27.0
|
79 |
+
opentelemetry-instrumentation==0.48b0
|
80 |
+
opentelemetry-proto==1.27.0
|
81 |
+
opentelemetry-sdk==1.27.0
|
82 |
+
opentelemetry-semantic-conventions==0.48b0
|
83 |
+
orjson==3.10.7
|
84 |
+
packaging==23.2
|
85 |
+
pillow==10.4.0
|
86 |
+
portalocker==2.10.1
|
87 |
+
protobuf==4.25.5
|
88 |
+
pydantic==2.9.2
|
89 |
+
pydantic-settings==2.5.2
|
90 |
+
pydantic_core==2.23.4
|
91 |
+
PyJWT==2.9.0
|
92 |
+
PyMuPDF==1.24.10
|
93 |
+
PyMuPDFb==1.24.10
|
94 |
+
python-dotenv==1.0.1
|
95 |
+
python-engineio==4.9.1
|
96 |
+
python-graphql-client==0.4.3
|
97 |
+
python-multipart==0.0.6
|
98 |
+
python-socketio==5.11.4
|
99 |
+
PyYAML==6.0.2
|
100 |
+
qdrant-client==1.11.2
|
101 |
+
regex==2024.9.11
|
102 |
+
requests==2.32.3
|
103 |
+
safetensors==0.4.5
|
104 |
+
scikit-learn==1.5.2
|
105 |
+
scipy==1.13.1
|
106 |
+
sentence-transformers==3.1.1
|
107 |
+
simple-websocket==1.0.0
|
108 |
+
sniffio==1.3.1
|
109 |
+
SQLAlchemy==2.0.35
|
110 |
+
starlette==0.27.0
|
111 |
+
sympy==1.13.3
|
112 |
+
syncer==2.0.3
|
113 |
+
tenacity==8.5.0
|
114 |
+
threadpoolctl==3.5.0
|
115 |
+
tiktoken==0.7.0
|
116 |
+
tokenizers==0.20.0
|
117 |
+
tomli==2.0.1
|
118 |
+
torch==2.4.1
|
119 |
+
tqdm==4.66.5
|
120 |
+
transformers==4.45.1
|
121 |
+
triton==3.0.0
|
122 |
+
typing-inspect==0.9.0
|
123 |
+
typing_extensions==4.12.2
|
124 |
+
uptrace==1.26.0
|
125 |
+
urllib3==2.2.3
|
126 |
+
uvicorn==0.23.2
|
127 |
+
watchfiles==0.20.0
|
128 |
+
websockets==13.1
|
129 |
+
wrapt==1.16.0
|
130 |
+
wsproto==1.2.0
|
131 |
+
yarl==1.13.1
|
132 |
+
zipp==3.20.2
|
solution_app.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import chainlit as cl
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from operator import itemgetter
|
5 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
6 |
+
from langchain_community.document_loaders import TextLoader
|
7 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
8 |
+
from langchain_community.vectorstores import FAISS
|
9 |
+
from langchain_huggingface import HuggingFaceEndpointEmbeddings
|
10 |
+
from langchain_core.prompts import PromptTemplate
|
11 |
+
from langchain.schema.output_parser import StrOutputParser
|
12 |
+
from langchain.schema.runnable import RunnablePassthrough
|
13 |
+
from langchain.schema.runnable.config import RunnableConfig
|
14 |
+
from tqdm.asyncio import tqdm_asyncio
|
15 |
+
import asyncio
|
16 |
+
from tqdm.asyncio import tqdm
|
17 |
+
|
18 |
+
# GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
|
19 |
+
# ---- ENV VARIABLES ---- #
|
20 |
+
"""
|
21 |
+
This function will load our environment file (.env) if it is present.
|
22 |
+
|
23 |
+
NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
|
24 |
+
"""
|
25 |
+
load_dotenv()
|
26 |
+
|
27 |
+
"""
|
28 |
+
We will load our environment variables here.
|
29 |
+
"""
|
30 |
+
HF_LLM_ENDPOINT = os.environ["HF_LLM_ENDPOINT"]
|
31 |
+
HF_EMBED_ENDPOINT = os.environ["HF_EMBED_ENDPOINT"]
|
32 |
+
HF_TOKEN = os.environ["HF_TOKEN"]
|
33 |
+
|
34 |
+
# ---- GLOBAL DECLARATIONS ---- #
|
35 |
+
|
36 |
+
# -- RETRIEVAL -- #
|
37 |
+
"""
|
38 |
+
1. Load Documents from Text File
|
39 |
+
2. Split Documents into Chunks
|
40 |
+
3. Load HuggingFace Embeddings (remember to use the URL we set above)
|
41 |
+
4. Index Files if they do not exist, otherwise load the vectorstore
|
42 |
+
"""
|
43 |
+
document_loader = TextLoader("./data/paul_graham_essays.txt")
|
44 |
+
documents = document_loader.load()
|
45 |
+
|
46 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
|
47 |
+
split_documents = text_splitter.split_documents(documents)
|
48 |
+
|
49 |
+
hf_embeddings = HuggingFaceEndpointEmbeddings(
|
50 |
+
model=HF_EMBED_ENDPOINT,
|
51 |
+
task="feature-extraction",
|
52 |
+
huggingfacehub_api_token=HF_TOKEN,
|
53 |
+
)
|
54 |
+
|
55 |
+
async def add_documents_async(vectorstore, documents):
|
56 |
+
await vectorstore.aadd_documents(documents)
|
57 |
+
|
58 |
+
async def process_batch(vectorstore, batch, is_first_batch, pbar):
|
59 |
+
if is_first_batch:
|
60 |
+
result = await FAISS.afrom_documents(batch, hf_embeddings)
|
61 |
+
else:
|
62 |
+
await add_documents_async(vectorstore, batch)
|
63 |
+
result = vectorstore
|
64 |
+
pbar.update(len(batch))
|
65 |
+
return result
|
66 |
+
|
67 |
+
async def main():
|
68 |
+
print("Indexing Files")
|
69 |
+
|
70 |
+
vectorstore = None
|
71 |
+
batch_size = 32
|
72 |
+
|
73 |
+
batches = [split_documents[i:i+batch_size] for i in range(0, len(split_documents), batch_size)]
|
74 |
+
|
75 |
+
async def process_all_batches():
|
76 |
+
nonlocal vectorstore
|
77 |
+
tasks = []
|
78 |
+
pbars = []
|
79 |
+
|
80 |
+
for i, batch in enumerate(batches):
|
81 |
+
pbar = tqdm(total=len(batch), desc=f"Batch {i+1}/{len(batches)}", position=i)
|
82 |
+
pbars.append(pbar)
|
83 |
+
|
84 |
+
if i == 0:
|
85 |
+
vectorstore = await process_batch(None, batch, True, pbar)
|
86 |
+
else:
|
87 |
+
tasks.append(process_batch(vectorstore, batch, False, pbar))
|
88 |
+
|
89 |
+
if tasks:
|
90 |
+
await asyncio.gather(*tasks)
|
91 |
+
|
92 |
+
for pbar in pbars:
|
93 |
+
pbar.close()
|
94 |
+
|
95 |
+
await process_all_batches()
|
96 |
+
|
97 |
+
hf_retriever = vectorstore.as_retriever()
|
98 |
+
print("\nIndexing complete. Vectorstore is ready for use.")
|
99 |
+
return hf_retriever
|
100 |
+
|
101 |
+
async def run():
|
102 |
+
retriever = await main()
|
103 |
+
return retriever
|
104 |
+
|
105 |
+
hf_retriever = asyncio.run(run())
|
106 |
+
|
107 |
+
# -- AUGMENTED -- #
|
108 |
+
"""
|
109 |
+
1. Define a String Template
|
110 |
+
2. Create a Prompt Template from the String Template
|
111 |
+
"""
|
112 |
+
RAG_PROMPT_TEMPLATE = """\
|
113 |
+
<|start_header_id|>system<|end_header_id|>
|
114 |
+
You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context, say you don't know.<|eot_id|>
|
115 |
+
|
116 |
+
<|start_header_id|>user<|end_header_id|>
|
117 |
+
User Query:
|
118 |
+
{query}
|
119 |
+
|
120 |
+
Context:
|
121 |
+
{context}<|eot_id|>
|
122 |
+
|
123 |
+
<|start_header_id|>assistant<|end_header_id|>
|
124 |
+
"""
|
125 |
+
|
126 |
+
rag_prompt = PromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
|
127 |
+
|
128 |
+
# -- GENERATION -- #
|
129 |
+
"""
|
130 |
+
1. Create a HuggingFaceEndpoint for the LLM
|
131 |
+
"""
|
132 |
+
hf_llm = HuggingFaceEndpoint(
|
133 |
+
endpoint_url=HF_LLM_ENDPOINT,
|
134 |
+
max_new_tokens=512,
|
135 |
+
top_k=10,
|
136 |
+
top_p=0.95,
|
137 |
+
temperature=0.3,
|
138 |
+
repetition_penalty=1.15,
|
139 |
+
huggingfacehub_api_token=HF_TOKEN,
|
140 |
+
)
|
141 |
+
|
142 |
+
@cl.author_rename
|
143 |
+
def rename(original_author: str):
|
144 |
+
"""
|
145 |
+
This function can be used to rename the 'author' of a message.
|
146 |
+
|
147 |
+
In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
|
148 |
+
"""
|
149 |
+
rename_dict = {
|
150 |
+
"Assistant" : "Paul Graham Essay Bot"
|
151 |
+
}
|
152 |
+
return rename_dict.get(original_author, original_author)
|
153 |
+
|
154 |
+
@cl.on_chat_start
|
155 |
+
async def start_chat():
|
156 |
+
"""
|
157 |
+
This function will be called at the start of every user session.
|
158 |
+
|
159 |
+
We will build our LCEL RAG chain here, and store it in the user session.
|
160 |
+
|
161 |
+
The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
|
162 |
+
"""
|
163 |
+
|
164 |
+
lcel_rag_chain = (
|
165 |
+
{"context": itemgetter("query") | hf_retriever, "query": itemgetter("query")}
|
166 |
+
| rag_prompt | hf_llm
|
167 |
+
)
|
168 |
+
|
169 |
+
cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
|
170 |
+
|
171 |
+
@cl.on_message
|
172 |
+
async def main(message: cl.Message):
|
173 |
+
"""
|
174 |
+
This function will be called every time a message is recieved from a session.
|
175 |
+
|
176 |
+
We will use the LCEL RAG chain to generate a response to the user query.
|
177 |
+
|
178 |
+
The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
|
179 |
+
"""
|
180 |
+
lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
|
181 |
+
|
182 |
+
msg = cl.Message(content="")
|
183 |
+
|
184 |
+
for chunk in await cl.make_async(lcel_rag_chain.stream)(
|
185 |
+
{"query": message.content},
|
186 |
+
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
|
187 |
+
):
|
188 |
+
await msg.stream_token(chunk)
|
189 |
+
|
190 |
+
await msg.send()
|