Commit
·
bed46bd
1
Parent(s):
a4aad6e
Midterm Prototype
Browse files- .chainlit/config.toml +84 -0
- .env +9 -0
- .gitattributes copy +35 -0
- .gitignore +1 -0
- BuildingAChainlitApp.md +111 -0
- Dockerfile +11 -0
- app.py +153 -0
- chainlit.md +5 -0
- requirements.txt +9 -0
.chainlit/config.toml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
# Whether to enable telemetry (default: true). No personal data is collected.
|
3 |
+
enable_telemetry = true
|
4 |
+
|
5 |
+
# List of environment variables to be provided by each user to use the app.
|
6 |
+
user_env = []
|
7 |
+
|
8 |
+
# Duration (in seconds) during which the session is saved when the connection is lost
|
9 |
+
session_timeout = 3600
|
10 |
+
|
11 |
+
# Enable third parties caching (e.g LangChain cache)
|
12 |
+
cache = false
|
13 |
+
|
14 |
+
# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
|
15 |
+
follow_symlink = true
|
16 |
+
|
17 |
+
[features]
|
18 |
+
# Show the prompt playground
|
19 |
+
prompt_playground = true
|
20 |
+
|
21 |
+
# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
|
22 |
+
unsafe_allow_html = false
|
23 |
+
|
24 |
+
# Process and display mathematical expressions. This can clash with "$" characters in messages.
|
25 |
+
latex = false
|
26 |
+
|
27 |
+
# Authorize users to upload files with messages
|
28 |
+
multi_modal = true
|
29 |
+
|
30 |
+
# Allows user to use speech to text
|
31 |
+
[features.speech_to_text]
|
32 |
+
enabled = false
|
33 |
+
# See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
|
34 |
+
# language = "en-US"
|
35 |
+
|
36 |
+
[UI]
|
37 |
+
# Name of the app and chatbot.
|
38 |
+
name = "Chatbot"
|
39 |
+
|
40 |
+
# Show the readme while the conversation is empty.
|
41 |
+
show_readme_as_default = true
|
42 |
+
|
43 |
+
# Description of the app and chatbot. This is used for HTML tags.
|
44 |
+
# description = ""
|
45 |
+
|
46 |
+
# Large size content are by default collapsed for a cleaner ui
|
47 |
+
default_collapse_content = true
|
48 |
+
|
49 |
+
# The default value for the expand messages settings.
|
50 |
+
default_expand_messages = false
|
51 |
+
|
52 |
+
# Hide the chain of thought details from the user in the UI.
|
53 |
+
hide_cot = false
|
54 |
+
|
55 |
+
# Link to your github repo. This will add a github button in the UI's header.
|
56 |
+
# github = ""
|
57 |
+
|
58 |
+
# Specify a CSS file that can be used to customize the user interface.
|
59 |
+
# The CSS file can be served from the public directory or via an external link.
|
60 |
+
# custom_css = "/public/test.css"
|
61 |
+
|
62 |
+
# Override default MUI light theme. (Check theme.ts)
|
63 |
+
[UI.theme.light]
|
64 |
+
#background = "#FAFAFA"
|
65 |
+
#paper = "#FFFFFF"
|
66 |
+
|
67 |
+
[UI.theme.light.primary]
|
68 |
+
#main = "#F80061"
|
69 |
+
#dark = "#980039"
|
70 |
+
#light = "#FFE7EB"
|
71 |
+
|
72 |
+
# Override default MUI dark theme. (Check theme.ts)
|
73 |
+
[UI.theme.dark]
|
74 |
+
#background = "#FAFAFA"
|
75 |
+
#paper = "#FFFFFF"
|
76 |
+
|
77 |
+
[UI.theme.dark.primary]
|
78 |
+
#main = "#F80061"
|
79 |
+
#dark = "#980039"
|
80 |
+
#light = "#FFE7EB"
|
81 |
+
|
82 |
+
|
83 |
+
[meta]
|
84 |
+
generated_by = "0.7.700"
|
.env
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
OPENAI_API_KEY = "sk-proj-VlBPJdI2pTDAj1ZuTR1CT3BlbkFJdPixzv6JsSYgSdeU6iUL"
|
2 |
+
ANTRHOPIC_API_KEY = "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"
|
3 |
+
SERPER_API_KEY = "e1433dbffb60ed98eb675bf110a3af71075787f7"
|
4 |
+
PINECONE_API_KEY = "02477b08-f824-4cda-accc-1104bd9c2268"
|
5 |
+
LANGCHAIN_API_KEY = "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"
|
6 |
+
LANGCHAIN_TRACING_V2=true
|
7 |
+
LANGCHAIN_ENDPOINT='https://api.smith.langchain.com'
|
8 |
+
TAVILY_API_KEY="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"
|
9 |
+
ZIPCODE_API_KEY="myBqK3rhFoNaVN2TKYWUdLplkZzOwh8uwA1GNqc35LocxwJEU10QohlJMCDbZkIx"
|
.gitattributes copy
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
__pycache__/
|
BuildingAChainlitApp.md
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Building a Chainlit App
|
2 |
+
|
3 |
+
What if we want to take our Week 1 Day 2 assignment - [Pythonic RAG](https://github.com/AI-Maker-Space/AIE4/tree/main/Week%201/Day%202) - and bring it out of the notebook?
|
4 |
+
|
5 |
+
Well - we'll cover exactly that here!
|
6 |
+
|
7 |
+
## Anatomy of a Chainlit Application
|
8 |
+
|
9 |
+
[Chainlit](https://docs.chainlit.io/get-started/overview) is a Python package similar to Streamlit that lets users write a backend and a front end in a single (or multiple) Python file(s). It is mainly used for prototyping LLM-based Chat Style Applications - though it is used in production in some settings with 1,000,000s of MAUs (Monthly Active Users).
|
10 |
+
|
11 |
+
The primary method of customizing and interacting with the Chainlit UI is through a few critical [decorators](https://blog.hubspot.com/website/decorators-in-python).
|
12 |
+
|
13 |
+
> NOTE: Simply put, the decorators (in Chainlit) are just ways we can "plug-in" to the functionality in Chainlit.
|
14 |
+
|
15 |
+
We'll be concerning ourselves with three main scopes:
|
16 |
+
|
17 |
+
1. On application start - when we start the Chainlit application with a command like `chainlit run app.py`
|
18 |
+
2. On chat start - when a chat session starts (a user opens the web browser to the address hosting the application)
|
19 |
+
3. On message - when the users sends a message through the input text box in the Chainlit UI
|
20 |
+
|
21 |
+
Let's dig into each scope and see what we're doing!
|
22 |
+
|
23 |
+
## On Application Start:
|
24 |
+
|
25 |
+
The first thing you'll notice is that we have the traditional "wall of imports" this is to ensure we have everything we need to run our application.
|
26 |
+
|
27 |
+
```python
|
28 |
+
import os
|
29 |
+
from typing import List
|
30 |
+
from chainlit.types import AskFileResponse
|
31 |
+
from aimakerspace.text_utils import CharacterTextSplitter, TextFileLoader
|
32 |
+
from aimakerspace.openai_utils.prompts import (
|
33 |
+
UserRolePrompt,
|
34 |
+
SystemRolePrompt,
|
35 |
+
AssistantRolePrompt,
|
36 |
+
)
|
37 |
+
from aimakerspace.openai_utils.embedding import EmbeddingModel
|
38 |
+
from aimakerspace.vectordatabase import VectorDatabase
|
39 |
+
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
|
40 |
+
import chainlit as cl
|
41 |
+
```
|
42 |
+
|
43 |
+
Next up, we have some prompt templates. As all sessions will use the same prompt templates without modification, and we don't need these templates to be specific per template - we can set them up here - at the application scope.
|
44 |
+
|
45 |
+
```python
|
46 |
+
system_template = """\
|
47 |
+
Use the following context to answer a users question. If you cannot find the answer in the context, say you don't know the answer."""
|
48 |
+
system_role_prompt = SystemRolePrompt(system_template)
|
49 |
+
|
50 |
+
user_prompt_template = """\
|
51 |
+
Context:
|
52 |
+
{context}
|
53 |
+
|
54 |
+
Question:
|
55 |
+
{question}
|
56 |
+
"""
|
57 |
+
user_role_prompt = UserRolePrompt(user_prompt_template)
|
58 |
+
```
|
59 |
+
|
60 |
+
> NOTE: You'll notice that these are the exact same prompt templates we used from the Pythonic RAG Notebook in Week 1 Day 2!
|
61 |
+
|
62 |
+
Following that - we can create the Python Class definition for our RAG pipeline - or *chain*, as we'll refer to it in the rest of this walkthrough.
|
63 |
+
|
64 |
+
Let's look at the definition first:
|
65 |
+
|
66 |
+
```python
|
67 |
+
class RetrievalAugmentedQAPipeline:
|
68 |
+
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
|
69 |
+
self.llm = llm
|
70 |
+
self.vector_db_retriever = vector_db_retriever
|
71 |
+
|
72 |
+
async def arun_pipeline(self, user_query: str):
|
73 |
+
### RETRIEVAL
|
74 |
+
context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
|
75 |
+
|
76 |
+
context_prompt = ""
|
77 |
+
for context in context_list:
|
78 |
+
context_prompt += context[0] + "\n"
|
79 |
+
|
80 |
+
### AUGMENTED
|
81 |
+
formatted_system_prompt = system_role_prompt.create_message()
|
82 |
+
|
83 |
+
formatted_user_prompt = user_role_prompt.create_message(question=user_query, context=context_prompt)
|
84 |
+
|
85 |
+
|
86 |
+
### GENERATION
|
87 |
+
async def generate_response():
|
88 |
+
async for chunk in self.llm.astream([formatted_system_prompt, formatted_user_prompt]):
|
89 |
+
yield chunk
|
90 |
+
|
91 |
+
return {"response": generate_response(), "context": context_list}
|
92 |
+
```
|
93 |
+
|
94 |
+
Notice a few things:
|
95 |
+
|
96 |
+
1. We have modified this `RetrievalAugmentedQAPipeline` from the initial notebook to support streaming.
|
97 |
+
2. In essence, our pipeline is *chaining* a few events together:
|
98 |
+
1. We take our user query, and chain it into our Vector Database to collect related chunks
|
99 |
+
2. We take those contexts and our user's questions and chain them into the prompt templates
|
100 |
+
3. We take that prompt template and chain it into our LLM call
|
101 |
+
4. We chain the response of the LLM call to the user
|
102 |
+
3. We are using a lot of `async` again!
|
103 |
+
|
104 |
+
#### QUESTION #1:
|
105 |
+
|
106 |
+
Why do we want to support streaming? What about streaming is important, or useful?
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
WORKDIR $HOME/app
|
7 |
+
COPY --chown=user . $HOME/app
|
8 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
9 |
+
RUN pip install -r requirements.txt
|
10 |
+
COPY . .
|
11 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import ChatPromptTemplate
|
2 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
3 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
4 |
+
from langchain_community.document_loaders import PyMuPDFLoader
|
5 |
+
import chainlit as cl
|
6 |
+
import os
|
7 |
+
from langchain_qdrant import QdrantVectorStore
|
8 |
+
from qdrant_client import QdrantClient
|
9 |
+
from qdrant_client.http.models import Distance, VectorParams
|
10 |
+
from langchain_openai.chat_models import ChatOpenAI
|
11 |
+
from operator import itemgetter
|
12 |
+
from langchain.schema.output_parser import StrOutputParser
|
13 |
+
from langchain.schema.runnable import RunnablePassthrough
|
14 |
+
from uuid import uuid4
|
15 |
+
|
16 |
+
from dotenv import load_dotenv, find_dotenv
|
17 |
+
|
18 |
+
load_dotenv(find_dotenv())
|
19 |
+
|
20 |
+
base_rag_prompt_template = """\
|
21 |
+
Only answer questions that are related to the context provided. DO NOT answer a question if it is unrelated to the context provided. If you do not know the answer, say "I do not know the answer".
|
22 |
+
|
23 |
+
Context:
|
24 |
+
{context}
|
25 |
+
|
26 |
+
Question:
|
27 |
+
{question}
|
28 |
+
"""
|
29 |
+
|
30 |
+
base_rag_prompt = ChatPromptTemplate.from_template(base_rag_prompt_template)
|
31 |
+
|
32 |
+
recursiveChunker = RecursiveCharacterTextSplitter(
|
33 |
+
chunk_size = 600,
|
34 |
+
chunk_overlap = 60,
|
35 |
+
length_function = len
|
36 |
+
)
|
37 |
+
|
38 |
+
LOCATION = ":memory:"
|
39 |
+
COLLECTION_NAME = "Ethical AI RAG"
|
40 |
+
VECTOR_SIZE = 384
|
41 |
+
|
42 |
+
model_name = "sentence-transformers/all-MiniLM-L6-v2"
|
43 |
+
untrained_embeddings = HuggingFaceEmbeddings(model_name=model_name)
|
44 |
+
|
45 |
+
rag_llm = ChatOpenAI(model="gpt-4o-mini")
|
46 |
+
|
47 |
+
class RetrievalAugmentedQAPipeline:
|
48 |
+
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: QdrantVectorStore) -> None:
|
49 |
+
self.chain = (
|
50 |
+
{"context": itemgetter("question") | vector_db_retriever, "question": itemgetter("question")}
|
51 |
+
| RunnablePassthrough.assign(context=itemgetter("context"))
|
52 |
+
| {"response": base_rag_prompt | llm, "context": itemgetter("context")}
|
53 |
+
)
|
54 |
+
|
55 |
+
async def arun_pipeline(self, user_query: str):
|
56 |
+
# Invoke the chain synchronously and get the result (assumed to be a dict)
|
57 |
+
result = self.chain.invoke({"question": user_query})
|
58 |
+
|
59 |
+
# Async generator to yield only the content part of the tuples, and ignore others
|
60 |
+
async def generate_response():
|
61 |
+
if isinstance(result, dict) and 'response' in result:
|
62 |
+
for chunk in result['response']:
|
63 |
+
# If the chunk is a tuple with a "content" field, yield the content
|
64 |
+
if isinstance(chunk, tuple) and len(chunk) == 2:
|
65 |
+
label, content = chunk
|
66 |
+
if label == "content":
|
67 |
+
yield content
|
68 |
+
else:
|
69 |
+
# Ignore tuples that don't have 'content' as the label
|
70 |
+
continue
|
71 |
+
elif isinstance(chunk, str):
|
72 |
+
# If it's already a string, just yield it (edge case)
|
73 |
+
yield chunk
|
74 |
+
else:
|
75 |
+
# Ignore any non-string, non-tuple chunks
|
76 |
+
continue
|
77 |
+
else:
|
78 |
+
# If result does not contain the expected structure, just do nothing
|
79 |
+
return
|
80 |
+
|
81 |
+
# Return the async generator
|
82 |
+
return generate_response()
|
83 |
+
|
84 |
+
@cl.on_chat_start
|
85 |
+
async def on_chat_start():
|
86 |
+
all_documents = []
|
87 |
+
pdf_urls = [
|
88 |
+
'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf',
|
89 |
+
'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf'
|
90 |
+
]
|
91 |
+
|
92 |
+
msg = cl.Message(
|
93 |
+
content=f"Loading documents...", disable_human_feedback=True
|
94 |
+
)
|
95 |
+
await msg.send()
|
96 |
+
|
97 |
+
for url in pdf_urls:
|
98 |
+
# Create a PyMuPDFLoader on that PDF
|
99 |
+
loader = PyMuPDFLoader(file_path=url)
|
100 |
+
# Load it into a document
|
101 |
+
documents = loader.load()
|
102 |
+
|
103 |
+
# Append each document to the all_documents list
|
104 |
+
all_documents.extend(documents)
|
105 |
+
|
106 |
+
print(f"Processing documents...")
|
107 |
+
split_docs = recursiveChunker.split_documents(all_documents)
|
108 |
+
|
109 |
+
# Create a dict vector store
|
110 |
+
qdrant_client = QdrantClient(LOCATION)
|
111 |
+
|
112 |
+
qdrant_client.create_collection(
|
113 |
+
collection_name=COLLECTION_NAME,
|
114 |
+
vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
|
115 |
+
)
|
116 |
+
|
117 |
+
qdrant_vector_store = QdrantVectorStore(
|
118 |
+
client=qdrant_client,
|
119 |
+
collection_name=COLLECTION_NAME,
|
120 |
+
embedding=untrained_embeddings,
|
121 |
+
)
|
122 |
+
|
123 |
+
uuids = [str(uuid4()) for _ in range(len(split_docs))]
|
124 |
+
|
125 |
+
qdrant_vector_store.add_documents(documents=split_docs, ids=uuids)
|
126 |
+
|
127 |
+
retriever = qdrant_vector_store.as_retriever()
|
128 |
+
|
129 |
+
# Create a chain
|
130 |
+
retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
|
131 |
+
vector_db_retriever=retriever,
|
132 |
+
llm=rag_llm
|
133 |
+
)
|
134 |
+
|
135 |
+
# Let the user know that the system is ready
|
136 |
+
msg.content = f"Processing done. You can now ask questions!"
|
137 |
+
await msg.update()
|
138 |
+
|
139 |
+
cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
|
140 |
+
|
141 |
+
|
142 |
+
@cl.on_message
|
143 |
+
async def main(message):
|
144 |
+
chain = cl.user_session.get("chain")
|
145 |
+
|
146 |
+
msg = cl.Message(content="")
|
147 |
+
result = await chain.arun_pipeline(message.content) # Returns the async generator
|
148 |
+
|
149 |
+
# Streaming each token in the response to the user
|
150 |
+
async for stream_resp in result: # Iterating over the async generator directly
|
151 |
+
await msg.stream_token(stream_resp)
|
152 |
+
|
153 |
+
await msg.send()
|
chainlit.md
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Welcome to the AI Q+A App (Prototype)
|
2 |
+
|
3 |
+
With this application, you can ask questions about the following docuemnts:
|
4 |
+
1. 2022: [Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People](https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf) (PDF)
|
5 |
+
2. 2024: [National Institute of Standards and Technology (NIST) Artificial Intelligent Risk Management Framework](https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf) (PDF)
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chainlit==0.7.700
|
2 |
+
langchain
|
3 |
+
langchain_huggingface
|
4 |
+
langchain_community
|
5 |
+
langchain_openai
|
6 |
+
qdrant-client
|
7 |
+
chainlit
|
8 |
+
python-dotenv
|
9 |
+
PyMuPDF
|