Ritesh-hf commited on
Commit
51aad00
1 Parent(s): 043287c

update referces

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -23,6 +23,7 @@ from langchain_community.chat_models import ChatPerplexity
23
  from langchain.retrievers.document_compressors import CrossEncoderReranker
24
  from langchain_community.cross_encoders import HuggingFaceCrossEncoder
25
  from langchain_core.prompts import PromptTemplate
 
26
 
27
  # Load environment variables
28
  load_dotenv(".env")
@@ -125,15 +126,8 @@ When responding to queries, follow these guidelines:
125
  - Use emaphsis on headings, important texts and phrases.
126
 
127
  3. Proper Citations and References:
128
- - ALWAYS INCLUDE SOURCES URLs where users can verify information or explore further.
129
- - Use inline citations with embed referenced source link in the format [1], [2], etc., in the response to reference sources.
130
- - ALWAYS PROVIDE "References" SECTION AT THE END OF RESPONSE.
131
- - In the "References" section, list the referenced sources with their urls in the following format
132
- 'References
133
- [1] Heading 1[Source 1 url] \
134
- [2] Heading 2[Source 2 url] \
135
- [3] Heading 3[Source 2 url] \
136
- '
137
 
138
  FOLLOW ALL THE GIVEN INSTRUCTIONS, FAILURE TO DO SO WILL RESULT IN TERMINATION OF THE CHAT.
139
 
@@ -192,14 +186,28 @@ async def websocket_endpoint(websocket: WebSocket):
192
  try:
193
  # Define an async generator for streaming
194
  async def stream_response():
 
 
195
  async for chunk in conversational_rag_chain.astream(
196
  {"input": question, 'language': language},
197
  config={"configurable": {"session_id": session_id}}
198
  ):
 
 
199
  # Send each chunk to the client
200
  if "answer" in chunk:
 
201
  await websocket.send_json({'response': chunk['answer']})
202
 
 
 
 
 
 
 
 
 
 
203
  await stream_response()
204
  except Exception as e:
205
  print(f"Error during message handling: {e}")
 
23
  from langchain.retrievers.document_compressors import CrossEncoderReranker
24
  from langchain_community.cross_encoders import HuggingFaceCrossEncoder
25
  from langchain_core.prompts import PromptTemplate
26
+ import re
27
 
28
  # Load environment variables
29
  load_dotenv(".env")
 
126
  - Use emaphsis on headings, important texts and phrases.
127
 
128
  3. Proper Citations and References:
129
+ - ALWAYS USE INLINE CITATIONS with embed source URLs where users can verify information or explore further.
130
+ - The citations should be in the format [1], [2], etc., in the response with links to reference sources.
 
 
 
 
 
 
 
131
 
132
  FOLLOW ALL THE GIVEN INSTRUCTIONS, FAILURE TO DO SO WILL RESULT IN TERMINATION OF THE CHAT.
133
 
 
186
  try:
187
  # Define an async generator for streaming
188
  async def stream_response():
189
+ complete_response = ""
190
+ context = {}
191
  async for chunk in conversational_rag_chain.astream(
192
  {"input": question, 'language': language},
193
  config={"configurable": {"session_id": session_id}}
194
  ):
195
+ if "context" in chunk:
196
+ context = chunk['context']
197
  # Send each chunk to the client
198
  if "answer" in chunk:
199
+ complete_response += chunk['answer']
200
  await websocket.send_json({'response': chunk['answer']})
201
 
202
+ if context:
203
+ citations = re.findall(r'\[(\d+)\]', complete_response)
204
+ citation_numbers = list(map(int, citations))
205
+ sources = dict()
206
+ for index, doc in enumerate(context):
207
+ if (index+1) in citation_numbers:
208
+ sources[f"[{index+1}]"] = doc.metadata["source"]
209
+ await websocket.send_json({'sources': sources})
210
+
211
  await stream_response()
212
  except Exception as e:
213
  print(f"Error during message handling: {e}")