Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,9 @@
|
|
1 |
-
import nltk
|
2 |
-
nltk.download('punkt_tab')
|
3 |
-
|
4 |
import os
|
5 |
from dotenv import load_dotenv
|
6 |
import asyncio
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from fastapi.middleware.cors import CORSMiddleware
|
11 |
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
|
12 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
13 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
@@ -31,7 +27,6 @@ USER_AGENT = os.getenv("USER_AGENT")
|
|
31 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
32 |
SECRET_KEY = os.getenv("SECRET_KEY")
|
33 |
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
|
34 |
-
HUGGINGFACE_TOKEN = os.getenv("huggingface_api_key")
|
35 |
SESSION_ID_DEFAULT = "abc123"
|
36 |
|
37 |
# Set environment variables
|
@@ -39,19 +34,14 @@ os.environ['USER_AGENT'] = USER_AGENT
|
|
39 |
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
|
40 |
os.environ["TOKENIZERS_PARALLELISM"] = 'true'
|
41 |
|
42 |
-
# Initialize
|
43 |
-
app =
|
44 |
-
|
45 |
-
|
46 |
-
app.
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
allow_methods=["*"],
|
51 |
-
allow_headers=["*"],
|
52 |
-
)
|
53 |
-
|
54 |
-
templates = Jinja2Templates(directory="templates")
|
55 |
|
56 |
# Function to initialize Pinecone connection
|
57 |
def initialize_pinecone(index_name: str):
|
@@ -62,6 +52,7 @@ def initialize_pinecone(index_name: str):
|
|
62 |
print(f"Error initializing Pinecone: {e}")
|
63 |
raise
|
64 |
|
|
|
65 |
##################################################
|
66 |
## Change down here
|
67 |
##################################################
|
@@ -73,29 +64,20 @@ bm25 = BM25Encoder().load("./UAE-NLA.json")
|
|
73 |
##################################################
|
74 |
##################################################
|
75 |
|
|
|
|
|
76 |
# Initialize models and retriever
|
77 |
-
embed_model = HuggingFaceEmbeddings(model_name="
|
78 |
retriever = PineconeHybridSearchRetriever(
|
79 |
embeddings=embed_model,
|
80 |
sparse_encoder=bm25,
|
81 |
index=pinecone_index,
|
82 |
-
top_k=
|
83 |
-
alpha=0.5
|
84 |
)
|
85 |
|
86 |
-
llm = ChatPerplexity(temperature=0, pplx_api_key=GROQ_API_KEY, model="llama-3.1-sonar-large-128k-chat", max_tokens=512, max_retries=2)
|
87 |
-
|
88 |
-
|
89 |
# Initialize LLM
|
90 |
-
|
91 |
-
|
92 |
-
# Initialize Reranker
|
93 |
-
# model = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-base")
|
94 |
-
# compressor = CrossEncoderReranker(model=model, top_n=10)
|
95 |
-
|
96 |
-
# compression_retriever = ContextualCompressionRetriever(
|
97 |
-
# base_compressor=compressor, base_retriever=retriever
|
98 |
-
# )
|
99 |
|
100 |
# Contextualization prompt and retriever
|
101 |
contextualize_q_system_prompt = """Given a chat history and the latest user question \
|
@@ -113,27 +95,30 @@ contextualize_q_prompt = ChatPromptTemplate.from_messages(
|
|
113 |
history_aware_retriever = create_history_aware_retriever(llm, retriever, contextualize_q_prompt)
|
114 |
|
115 |
# QA system prompt and chain
|
116 |
-
qa_system_prompt = """
|
117 |
-
If you don't know the answer, simply state that you don't know.
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
|
|
|
|
|
|
137 |
{context}
|
138 |
"""
|
139 |
qa_prompt = ChatPromptTemplate.from_messages(
|
@@ -143,9 +128,7 @@ qa_prompt = ChatPromptTemplate.from_messages(
|
|
143 |
("human", "{input}")
|
144 |
]
|
145 |
)
|
146 |
-
|
147 |
-
document_prompt = PromptTemplate(input_variables=["page_content", "source"], template="{page_content} \n\n Source: {source}")
|
148 |
-
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt, document_prompt=document_prompt)
|
149 |
|
150 |
# Retrieval and Generative (RAG) Chain
|
151 |
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
|
@@ -153,6 +136,9 @@ rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chai
|
|
153 |
# Chat message history storage
|
154 |
store = {}
|
155 |
|
|
|
|
|
|
|
156 |
def get_session_history(session_id: str) -> BaseChatMessageHistory:
|
157 |
if session_id not in store:
|
158 |
store[session_id] = ChatMessageHistory()
|
@@ -168,69 +154,46 @@ conversational_rag_chain = RunnableWithMessageHistory(
|
|
168 |
output_messages_key="answer",
|
169 |
)
|
170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
-
# WebSocket endpoint with streaming
|
173 |
-
@app.websocket("/ws")
|
174 |
-
async def websocket_endpoint(websocket: WebSocket):
|
175 |
-
await websocket.accept()
|
176 |
-
print(f"Client connected: {websocket.client}")
|
177 |
-
session_id = None
|
178 |
try:
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
# Process the question
|
189 |
-
try:
|
190 |
-
# Define an async generator for streaming
|
191 |
-
async def stream_response():
|
192 |
-
complete_response = ""
|
193 |
-
context = {}
|
194 |
-
async for chunk in conversational_rag_chain.astream(
|
195 |
-
{"input": question, 'language': language},
|
196 |
-
config={"configurable": {"session_id": session_id}}
|
197 |
-
):
|
198 |
-
if "context" in chunk:
|
199 |
-
context = chunk['context']
|
200 |
-
# Send each chunk to the client
|
201 |
-
if "answer" in chunk:
|
202 |
-
complete_response += chunk['answer']
|
203 |
-
await websocket.send_json({'response': chunk['answer']})
|
204 |
-
|
205 |
-
if context:
|
206 |
-
citations = re.findall(r'\[(\d+)\]', complete_response)
|
207 |
-
citation_numbers = list(map(int, citations))
|
208 |
-
sources = dict()
|
209 |
-
backup = dict()
|
210 |
-
i=1
|
211 |
-
for index, doc in enumerate(context):
|
212 |
-
if (index+1) in citation_numbers:
|
213 |
-
sources[f"[{index+1}]"] = doc.metadata["source"]
|
214 |
-
else:
|
215 |
-
if doc.metadata["source"] not in backup.values():
|
216 |
-
backup[f"[{i}]"] = doc.metadata["source"]
|
217 |
-
i += 1
|
218 |
-
if sources:
|
219 |
-
await websocket.send_json({'sources': sources})
|
220 |
-
else:
|
221 |
-
await websocket.send_json({'sources': backup})
|
222 |
-
|
223 |
-
await stream_response()
|
224 |
-
except Exception as e:
|
225 |
-
print(f"Error during message handling: {e}")
|
226 |
-
await websocket.send_json({'response': "Something went wrong, Please try again." + str(e)})
|
227 |
-
except WebSocketDisconnect:
|
228 |
-
print(f"Client disconnected: {websocket.client}")
|
229 |
-
if session_id:
|
230 |
-
store.pop(session_id, None)
|
231 |
|
232 |
# Home route
|
233 |
-
@app.
|
234 |
-
|
235 |
-
return
|
236 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
import asyncio
|
4 |
+
from flask import Flask, request, render_template
|
5 |
+
from flask_cors import CORS
|
6 |
+
from flask_socketio import SocketIO, emit, join_room, leave_room
|
|
|
7 |
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
|
8 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
9 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
|
|
27 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
28 |
SECRET_KEY = os.getenv("SECRET_KEY")
|
29 |
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
|
|
|
30 |
SESSION_ID_DEFAULT = "abc123"
|
31 |
|
32 |
# Set environment variables
|
|
|
34 |
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
|
35 |
os.environ["TOKENIZERS_PARALLELISM"] = 'true'
|
36 |
|
37 |
+
# Initialize Flask app and SocketIO with CORS
|
38 |
+
app = Flask(__name__)
|
39 |
+
CORS(app)
|
40 |
+
socketio = SocketIO(app, cors_allowed_origins="*")
|
41 |
+
app.config['SESSION_COOKIE_SECURE'] = True # Use HTTPS
|
42 |
+
app.config['SESSION_COOKIE_HTTPONLY'] = True
|
43 |
+
app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
|
44 |
+
app.config['SECRET_KEY'] = SECRET_KEY
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
# Function to initialize Pinecone connection
|
47 |
def initialize_pinecone(index_name: str):
|
|
|
52 |
print(f"Error initializing Pinecone: {e}")
|
53 |
raise
|
54 |
|
55 |
+
|
56 |
##################################################
|
57 |
## Change down here
|
58 |
##################################################
|
|
|
64 |
##################################################
|
65 |
##################################################
|
66 |
|
67 |
+
# old_embed_model = HuggingFaceEmbeddings(model_name="sentence-transformers/gte-multilingual-base")
|
68 |
+
|
69 |
# Initialize models and retriever
|
70 |
+
embed_model = HuggingFaceEmbeddings(model_name="Alibaba-NLP/gte-multilingual-base", model_kwargs={"trust_remote_code":True})
|
71 |
retriever = PineconeHybridSearchRetriever(
|
72 |
embeddings=embed_model,
|
73 |
sparse_encoder=bm25,
|
74 |
index=pinecone_index,
|
75 |
+
top_k=20,
|
76 |
+
alpha=0.5
|
77 |
)
|
78 |
|
|
|
|
|
|
|
79 |
# Initialize LLM
|
80 |
+
llm = ChatPerplexity(temperature=0, pplx_api_key=GROQ_API_KEY, model="llama-3.1-sonar-large-128k-chat", max_tokens=512, max_retries=2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
# Contextualization prompt and retriever
|
83 |
contextualize_q_system_prompt = """Given a chat history and the latest user question \
|
|
|
95 |
history_aware_retriever = create_history_aware_retriever(llm, retriever, contextualize_q_prompt)
|
96 |
|
97 |
# QA system prompt and chain
|
98 |
+
qa_system_prompt = """You are a highly skilled information retrieval assistant. Use the following context to answer questions effectively. \
|
99 |
+
If you don't know the answer, simply state that you don't know. \
|
100 |
+
Your answer should be in {language} language. \
|
101 |
+
Provide answers in proper HTML format and keep them concise. \
|
102 |
+
When responding to queries, follow these guidelines: \
|
103 |
+
1. Provide Clear Answers: \
|
104 |
+
- Based on the language of the question, you have to answer in that language. E.g. if the question is in English language then answer in the English language or if the question is in Arabic language then you should answer in Arabic language. /
|
105 |
+
- Ensure the response directly addresses the query with accurate and relevant information.\
|
106 |
+
2. Include Detailed References: \
|
107 |
+
- Links to Sources: Include URLs to credible sources where users can verify information or explore further. \
|
108 |
+
- Reference Sites: Mention specific websites or platforms that offer additional information. \
|
109 |
+
- Downloadable Materials: Provide links to any relevant downloadable resources if applicable. \
|
110 |
+
|
111 |
+
3. Formatting for Readability: \
|
112 |
+
- The answer should be in a proper HTML format with appropriate tags. \
|
113 |
+
- For arabic language response align the text to right and convert numbers also.
|
114 |
+
- Double check if the language of answer is correct or not.
|
115 |
+
- Use bullet points or numbered lists where applicable to present information clearly. \
|
116 |
+
- Highlight key details using bold or italics. \
|
117 |
+
- Provide proper and meaningful abbreviations for urls. Do not include naked urls. \
|
118 |
+
|
119 |
+
4. Organize Content Logically: \
|
120 |
+
- Structure the content in a logical order, ensuring easy navigation and understanding for the user. \
|
121 |
+
|
122 |
{context}
|
123 |
"""
|
124 |
qa_prompt = ChatPromptTemplate.from_messages(
|
|
|
128 |
("human", "{input}")
|
129 |
]
|
130 |
)
|
131 |
+
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
|
|
|
|
|
132 |
|
133 |
# Retrieval and Generative (RAG) Chain
|
134 |
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
|
|
|
136 |
# Chat message history storage
|
137 |
store = {}
|
138 |
|
139 |
+
def clean_temporary_data():
|
140 |
+
store.clear()
|
141 |
+
|
142 |
def get_session_history(session_id: str) -> BaseChatMessageHistory:
|
143 |
if session_id not in store:
|
144 |
store[session_id] = ChatMessageHistory()
|
|
|
154 |
output_messages_key="answer",
|
155 |
)
|
156 |
|
157 |
+
# Function to handle WebSocket connection
|
158 |
+
@socketio.on('connect')
|
159 |
+
def handle_connect():
|
160 |
+
print(f"Client connected: {request.sid}")
|
161 |
+
emit('connection_response', {'message': 'Connected successfully.'})
|
162 |
+
|
163 |
+
# Function to handle WebSocket disconnection
|
164 |
+
@socketio.on('disconnect')
|
165 |
+
def handle_disconnect():
|
166 |
+
print(f"Client disconnected: {request.sid}")
|
167 |
+
clean_temporary_data()
|
168 |
+
|
169 |
+
# Function to handle WebSocket messages
|
170 |
+
@socketio.on('message')
|
171 |
+
def handle_message(data):
|
172 |
+
question = data.get('question')
|
173 |
+
language = data.get('language')
|
174 |
+
if "en" in language:
|
175 |
+
language = "English"
|
176 |
+
else:
|
177 |
+
language = "Arabic"
|
178 |
+
session_id = data.get('session_id', SESSION_ID_DEFAULT)
|
179 |
+
chain = conversational_rag_chain.pick("answer")
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
try:
|
182 |
+
for chunk in chain.stream(
|
183 |
+
{"input": question, 'language': language},
|
184 |
+
config={"configurable": {"session_id": session_id}},
|
185 |
+
):
|
186 |
+
emit('response', chunk, room=request.sid)
|
187 |
+
except Exception as e:
|
188 |
+
print(f"Error during message handling: {e}")
|
189 |
+
emit('response', {"error": "An error occurred while processing your request."}, room=request.sid)
|
190 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
# Home route
|
193 |
+
@app.route("/")
|
194 |
+
def index_view():
|
195 |
+
return render_template('chat.html')
|
196 |
|
197 |
+
# Main function to run the app
|
198 |
+
if __name__ == '__main__':
|
199 |
+
socketio.run(app, debug=True)
|