KatGaw commited on
Commit
56c3d01
·
1 Parent(s): 51eb569

adding new files

Browse files
Files changed (7) hide show
  1. .DS_Store +0 -0
  2. Dockerfile +11 -0
  3. README.md +28 -4
  4. airbnb_10k_filings.pdf +0 -0
  5. app.py +163 -0
  6. chainlit.md +6 -0
  7. requirements.txt +10 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ COPY ./requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
README.md CHANGED
@@ -1,11 +1,35 @@
1
  ---
2
- title: Airbnb New Space
3
- emoji: 🏆
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
- license: openrail
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Ekonez
3
+ emoji: 📉
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
+ app_port: 7860
9
  ---
10
 
11
+ ## 🤖 Appka pro snadný přístup k daňovým zákonům
12
+
13
+ > Polož otázku o daních.
14
+
15
+ <p align = "center" draggable=”false” ><img src="image.jpg"
16
+ width="200px"
17
+ height="auto"/>
18
+ </p>
19
+
20
+
21
+ ## Odpovědi čerpají z webu: "https://www.zakonyprolidi.cz/
22
+
23
+ ---
24
+
25
+ <p align = "center" draggable=”false” ><img src="https://github.com/AI-Maker-Space/LLM-Dev-101/assets/37101144/d1343317-fa2f-41e1-8af1-1dbb18399719"
26
+ width="200px"
27
+ height="auto"/>
28
+ </p>
29
+
30
+
31
+ ## <h1 align="center" id="heading">:wave: Welcome to Beyond ChatGPT!!</h1>
32
+
33
+ For a step-by-step YouTube video walkthrough, watch this! [Deploying Chainlit app on Hugging Face](https://www.youtube.com/live/pRbbZcL0NMI?si=NAYhMZ_suAY84f06&t=2119)
34
+
35
+ ![Beyond ChatGPT: Build Your First LLM Application](https://github.com/AI-Maker-Space/Beyond-ChatGPT/assets/48775140/cb7a74b8-28af-4d12-a008-8f5a51d47b4c)
airbnb_10k_filings.pdf ADDED
Binary file (596 kB). View file
 
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import chainlit as cl
3
+ from dotenv import load_dotenv
4
+ from operator import itemgetter
5
+ from langchain_openai import ChatOpenAI
6
+ from langchain.schema.runnable import RunnablePassthrough
7
+ from langchain.schema.runnable.config import RunnableConfig
8
+ from langchain_openai.embeddings import OpenAIEmbeddings
9
+ from langchain.document_loaders import PyMuPDFLoader
10
+ import tiktoken
11
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
12
+ from langchain_community.vectorstores import Qdrant
13
+ from langchain_openai.embeddings import OpenAIEmbeddings
14
+ from langchain_core.prompts import ChatPromptTemplate
15
+ from langchain.schema.output_parser import StrOutputParser
16
+ from langchain.schema.runnable import RunnablePassthrough
17
+ from dotenv import main
18
+ import openai
19
+
20
+ # GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
21
+ # ---- ENV VARIABLES ---- #
22
+ """
23
+ This function will load our environment file (.env) if it is present.
24
+
25
+ NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
26
+ """
27
+ main.load_dotenv()
28
+
29
+ """
30
+ We will load our environment variables here.
31
+ """
32
+ openai.api_key = os.getenv("OPENAI_API_KEY")
33
+
34
+ # Model
35
+ openai_chat_model = ChatOpenAI(model="gpt-4o")
36
+
37
+ # upload embedding model
38
+ embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
39
+
40
+ # ---- GLOBAL DECLARATIONS ---- #
41
+ @cl.on_chat_start
42
+ async def init():
43
+ # -- RETRIEVAL -- #
44
+ """
45
+ 1. Load Documents from Text File
46
+ 2. Split Documents into Chunks
47
+ 3. Load HuggingFace Embeddings (remember to use the URL we set above)
48
+ 4. Index Files if they do not exist, otherwise load the vectorstore
49
+ """
50
+ # upload file
51
+ #docs=TextLoader("./data/airbnb_10k_filings.txt").load()
52
+ docs = PyMuPDFLoader("airbnb_10k_filings.pdf").load()
53
+
54
+ import tiktoken
55
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
56
+ def tiktoken_len(text):
57
+ tokens = tiktoken.encoding_for_model("gpt-4o").encode(
58
+ text,
59
+ )
60
+ return len(tokens)
61
+
62
+ text_splitter = RecursiveCharacterTextSplitter(
63
+ chunk_size = 200,
64
+ chunk_overlap = 0,
65
+ length_function = tiktoken_len,
66
+ )
67
+
68
+ split_chunks = text_splitter.split_documents(docs)
69
+
70
+ max_chunk_length = 0
71
+
72
+ for chunk in split_chunks:
73
+ max_chunk_length = max(max_chunk_length, tiktoken_len(chunk.page_content))
74
+
75
+ # Embeddings and Vector store
76
+ qdrant_vectorstore = Qdrant.from_documents(
77
+ split_chunks,
78
+ embedding_model,
79
+ location=":memory:",
80
+ collection_name="airbnb 10k filings",
81
+ )
82
+ print("Loaded Vectorstore")
83
+
84
+ # Ste up ur retriever using LangChain
85
+ qdrant_retriever = qdrant_vectorstore.as_retriever()
86
+
87
+ # -- AUGMENTED -- #
88
+ """
89
+ 1. Define a String Template
90
+ 2. Create a Prompt Template from the String Template
91
+ """
92
+ RAG_PROMPT = """
93
+ CONTEXT:
94
+ {context}
95
+
96
+ QUERY:
97
+ {question}
98
+ Use the provide context to answer the provided user question. Only use the provided context to answer the question. If you do not know the answer, response with "I don't know"
99
+ """
100
+
101
+ CONTEXT = """
102
+ This report on Airbnb 10k filings contains unstructured and structured tabular data, use both.
103
+ """
104
+
105
+ rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
106
+
107
+ # -- Our RAG Chain -- #
108
+
109
+ """
110
+ This function will be called at the start of every user session.
111
+
112
+ We will build our LCEL RAG chain here, and store it in the user session.
113
+
114
+ The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
115
+ """
116
+
117
+ lcel_rag_chain = (
118
+ # INVOKE CHAIN WITH: {"question" : "<<SOME USER QUESTION>>"}
119
+ # "question" : populated by getting the value of the "question" key
120
+ # "context" : populated by getting the value of the "question" key and chaining it into the base_retriever
121
+ {"context": itemgetter("question") | qdrant_retriever, "question": itemgetter("question")}
122
+ # "context" : is assigned to a RunnablePassthrough object (will not be called or considered in the next step)
123
+ # by getting the value of the "context" key from the previous step
124
+ | RunnablePassthrough.assign(context=itemgetter("context"))
125
+ # "response" : the "context" and "question" values are used to format our prompt object and then piped
126
+ # into the LLM and stored in a key called "response"
127
+ # "context" : populated by getting the value of the "context" key from the previous step
128
+ | {"response": rag_prompt | openai_chat_model, "context": itemgetter("context")}
129
+ )
130
+ # cl.user_session.set("retrieval_augmented_qa_chain", retrieval_augmented_qa_chain)
131
+
132
+ # lcel_rag_chain = (
133
+ # {"context": itemgetter("question") | qdrant_retriever, "question": itemgetter("question")}
134
+ # | rag_prompt | openai_chat_model
135
+ # )
136
+ cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
137
+
138
+ @cl.on_message
139
+ async def main(message: cl.Message):
140
+ """
141
+ This function will be called every time a message is recieved from a session.
142
+
143
+ We will use the LCEL RAG chain to generate a response to the user query.
144
+
145
+ The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
146
+ """
147
+ lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
148
+
149
+ msg = cl.Message(content="")
150
+
151
+ # for chunk in await cl.make_async(lcel_rag_chain.stream)(
152
+ # {"question": message.content},
153
+ # config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
154
+ # ):
155
+ # await msg.stream_token(chunk)
156
+
157
+ # await msg.send()
158
+ print(msg)
159
+ response = lcel_rag_chain.invoke({"question" : message.content})
160
+ # lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
161
+ # res = lcel_rag_chain.invoke({"question":message.content})
162
+ print(response["response"].content)
163
+ await cl.Message(content=response["response"].content).send()
chainlit.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+ ## 🤖 Appka pro snadný přístup k daňovým zákonům
3
+ ![alt text](https://huggingface.co/spaces/KatGaw/Ekonez/blob/main/image.jpg)
4
+ > Polož otázku o daních.
5
+
6
+ ## Odpovědi čerpají z webu: "https://www.zakonyprolidi.cz/
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ chainlit==1.1.304
2
+ langchain>0.2.0
3
+ langchain_community==0.2.5
4
+ langchain_core==0.2.9
5
+ langchain_text_splitters==0.2.1
6
+ python-dotenv==1.0.1
7
+ langchain_openai==0.1.8
8
+ qdrant-client==1.9.2
9
+ tiktoken==0.7.0
10
+ pymupdf==1.24.5